[RISCV] Remove --riscv-no-aliases from RVV tests

This serves no useful purpose other than to clutter things up. Diff
summary as the real diff is extremely unwieldy:

   24844 -; CHECK-NEXT:    jalr zero, 0(ra)
   24844 +; CHECK-NEXT:    ret
       8 -; CHECK-NEXT:    vl4re8.v v28, (a0)
       8 +; CHECK-NEXT:    vl4r.v v28, (a0)
      64 -; CHECK-NEXT:    vl8re8.v v24, (a0)
      64 +; CHECK-NEXT:    vl8r.v v24, (a0)
     392 -; RUN:   --riscv-no-aliases < %s | FileCheck %s
     392 +; RUN:   < %s | FileCheck %s
       1 -; RUN:   -verify-machineinstrs --riscv-no-aliases < %s \
       1 +; RUN:   -verify-machineinstrs < %s \

As discussed in D103004.
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
index bafdd31..aad8f23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
index b6af949..981d1bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
index 0e3b779..8dfdfa7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
index 31fcfcf..4644d31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
index b65e5bd..06ef128 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -998,7 +998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1014,7 +1014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1030,7 +1030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1078,7 +1078,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1094,7 +1094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1110,7 +1110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1142,7 +1142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1190,7 +1190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1206,7 +1206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1222,7 +1222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1238,7 +1238,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1270,7 +1270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1302,7 +1302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1318,7 +1318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1334,7 +1334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
index ac991d5..3d7756a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -990,7 +990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1006,7 +1006,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1022,7 +1022,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1038,7 +1038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1086,7 +1086,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1102,7 +1102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1134,7 +1134,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1150,7 +1150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1166,7 +1166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1198,7 +1198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1214,7 +1214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1246,7 +1246,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1262,7 +1262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1278,7 +1278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1310,7 +1310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
index 12a47b6..187b279 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
index 1bedcca..b3c6f06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
index 87fa74d..723ef8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
index c36bd2b..2998e8b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
index 0101347..313d205 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
index 6b8046c..2af4dd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
index 9d7be0c..418b19f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
index a8e6f95..d00eff0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
index 3a0d130..dba4340 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
index cc23e7d..99af9fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
index cff18fb..fc4478a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
index 03c9311..86652af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
index 5937304..f4ec68e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
index 16083d6..41e1763 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
index 4cfdbb0..4065a91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
index e12c2e2..c4711cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
index 6e25c14..92dd361 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1693,7 +1693,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1717,7 +1717,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1741,7 +1741,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1765,7 +1765,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1789,7 +1789,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1813,7 +1813,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1837,7 +1837,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1861,7 +1861,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1885,7 +1885,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1909,7 +1909,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1933,7 +1933,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1957,7 +1957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2005,7 +2005,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2029,7 +2029,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2053,7 +2053,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2077,7 +2077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2101,7 +2101,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2149,7 +2149,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2173,7 +2173,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2197,7 +2197,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2221,7 +2221,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2245,7 +2245,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2269,7 +2269,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2293,7 +2293,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2317,7 +2317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2341,7 +2341,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2365,7 +2365,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2389,7 +2389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2413,7 +2413,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2437,7 +2437,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2461,7 +2461,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2485,7 +2485,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2509,7 +2509,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2533,7 +2533,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2557,7 +2557,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2581,7 +2581,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2605,7 +2605,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2629,7 +2629,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2653,7 +2653,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2677,7 +2677,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2701,7 +2701,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2725,7 +2725,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2749,7 +2749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2773,7 +2773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2797,7 +2797,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2821,7 +2821,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2845,7 +2845,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2869,7 +2869,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2893,7 +2893,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2917,7 +2917,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2941,7 +2941,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2965,7 +2965,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2989,7 +2989,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3013,7 +3013,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3037,7 +3037,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3061,7 +3061,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3085,7 +3085,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3109,7 +3109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3133,7 +3133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3157,7 +3157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3181,7 +3181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3205,7 +3205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3229,7 +3229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3253,7 +3253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3277,7 +3277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3301,7 +3301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3325,7 +3325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
@@ -3349,7 +3349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
index 043d80d..52989f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -1693,7 +1693,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1717,7 +1717,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -1741,7 +1741,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1765,7 +1765,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -1789,7 +1789,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1813,7 +1813,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1837,7 +1837,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1861,7 +1861,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1885,7 +1885,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1909,7 +1909,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1933,7 +1933,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1957,7 +1957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2005,7 +2005,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -2029,7 +2029,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2053,7 +2053,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -2077,7 +2077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2101,7 +2101,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> *%0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2149,7 +2149,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> *%0,
@@ -2173,7 +2173,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2197,7 +2197,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> *%0,
@@ -2221,7 +2221,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2245,7 +2245,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> *%0,
@@ -2269,7 +2269,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2293,7 +2293,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> *%0,
@@ -2317,7 +2317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2341,7 +2341,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> *%0,
@@ -2365,7 +2365,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2389,7 +2389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> *%0,
@@ -2413,7 +2413,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2437,7 +2437,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> *%0,
@@ -2461,7 +2461,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2485,7 +2485,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> *%0,
@@ -2509,7 +2509,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2533,7 +2533,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -2557,7 +2557,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2581,7 +2581,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -2605,7 +2605,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2629,7 +2629,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -2653,7 +2653,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2677,7 +2677,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -2701,7 +2701,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2725,7 +2725,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -2749,7 +2749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2773,7 +2773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -2797,7 +2797,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2821,7 +2821,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -2845,7 +2845,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2869,7 +2869,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -2893,7 +2893,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2917,7 +2917,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -2941,7 +2941,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2965,7 +2965,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> *%0,
@@ -2989,7 +2989,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3013,7 +3013,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> *%0,
@@ -3037,7 +3037,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3061,7 +3061,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> *%0,
@@ -3085,7 +3085,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3109,7 +3109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> *%0,
@@ -3133,7 +3133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3157,7 +3157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> *%0,
@@ -3181,7 +3181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3205,7 +3205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> *%0,
@@ -3229,7 +3229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3253,7 +3253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> *%0,
@@ -3277,7 +3277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3301,7 +3301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> *%0,
@@ -3325,7 +3325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
@@ -3349,7 +3349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
index fe48785..5061ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
index ce250b0..f732b98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
   <vscale x 1 x i32>*,
   <vscale x 1 x i64>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> *%0,
@@ -61,7 +61,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -85,7 +85,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> *%0,
@@ -109,7 +109,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -133,7 +133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> *%0,
@@ -157,7 +157,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -181,7 +181,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> *%0,
@@ -205,7 +205,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -229,7 +229,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> *%0,
@@ -253,7 +253,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -277,7 +277,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> *%0,
@@ -301,7 +301,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -325,7 +325,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> *%0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> *%0,
@@ -397,7 +397,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> *%0,
@@ -445,7 +445,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -469,7 +469,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> *%0,
@@ -493,7 +493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -517,7 +517,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> *%0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> *%0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> *%0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> *%0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> *%0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> *%0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> *%0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> *%0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> *%0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> *%0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> *%0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> *%0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> *%0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> *%0,
@@ -1165,7 +1165,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> *%0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1237,7 +1237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> *%0,
@@ -1261,7 +1261,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1285,7 +1285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> *%0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1333,7 +1333,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> *%0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1381,7 +1381,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> *%0,
@@ -1405,7 +1405,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1429,7 +1429,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> *%0,
@@ -1453,7 +1453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1477,7 +1477,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> *%0,
@@ -1501,7 +1501,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1525,7 +1525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> *%0,
@@ -1549,7 +1549,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1573,7 +1573,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> *%0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
 ; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> *%0,
@@ -1645,7 +1645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> *%0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
index f0ebddf..40272c1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
index 94c63dd9..ce8677b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vand.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vand.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
index 37f46aa..04bbf5c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
index b097a34..0759e15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
index d26c5e4..84f6944 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
index 0d14aec..f41d9fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
index 2f155d9..d5bfa27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
     <vscale x 1 x half> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
     <vscale x 2 x half> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
     <vscale x 4 x half> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
     <vscale x 8 x half> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
     <vscale x 16 x half> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
     <vscale x 32 x half> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
     <vscale x 1 x float> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
     <vscale x 2 x float> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
     <vscale x 4 x float> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
     <vscale x 8 x float> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
     <vscale x 16 x float> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
     <vscale x 1 x double> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
     <vscale x 2 x double> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
     <vscale x 4 x double> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
     <vscale x 8 x double> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vcompress.vm v25, v8, v0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> undef,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
index 1dcdcab..ce67123 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
     <vscale x 1 x half> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
     <vscale x 2 x half> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
     <vscale x 4 x half> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
     <vscale x 8 x half> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
     <vscale x 16 x half> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
     <vscale x 32 x half> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
     <vscale x 1 x float> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
     <vscale x 2 x float> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
     <vscale x 4 x float> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
     <vscale x 8 x float> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
     <vscale x 16 x float> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
     <vscale x 1 x double> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
     <vscale x 2 x double> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
     <vscale x 4 x double> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
     <vscale x 8 x double> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vcompress.vm v25, v8, v0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
     <vscale x 1 x i8> undef,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
index 11729fd..eab4d15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
index 99ce81c..97478e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vdiv.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdiv.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
index a4725ad..8174e21a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
index f2b8b85..7cd6d03 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vdivu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vdivu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
index 9366b83..1aad81f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
index b20c3be..7052ae2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -255,7 +255,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -299,7 +299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -321,7 +321,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -343,7 +343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -387,7 +387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -409,7 +409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -476,7 +476,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -498,7 +498,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -520,7 +520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -586,7 +586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -608,7 +608,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -699,7 +699,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -722,7 +722,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -745,7 +745,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -768,7 +768,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -791,7 +791,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -814,7 +814,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -837,7 +837,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -860,7 +860,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -883,7 +883,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -906,7 +906,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -929,7 +929,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -952,7 +952,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -975,7 +975,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1044,7 +1044,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1067,7 +1067,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1090,7 +1090,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1136,7 +1136,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1205,7 +1205,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1228,7 +1228,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1274,7 +1274,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1297,7 +1297,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfadd.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1343,7 +1343,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
index 62ce43bf..028fe20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i16> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i16> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i16> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i16> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i32 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x i16> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i32> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i32> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i32> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i32> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i32 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i32> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i64> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i64> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i64> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i32 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i64> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
index 00977cb..2b7f671 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i16> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i16> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i16> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i16> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i16> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x i16> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i32> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i32> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i32> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i32> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x i32> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x i64> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x i64> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x i64> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfclass.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfclass.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x i64> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
index af453fa..ecf15bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
index a7c94da..dec3d3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
index f7bd966..2d23526 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
index 7d15333..bdcf80c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
   <vscale x 1 x i16>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
index b7f66f0..dda4f9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
index 89ef4c7..98bc96f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
index 6200daf..0a0f817 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
index 6ac117b..e80ba35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
index 63eb153..40ca84c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
index 09fe017..b0421e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
index 175289c..a857d90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
index 6b0e1a6..a061c7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
     <vscale x 1 x i16> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16(
     <vscale x 2 x i16> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16(
     <vscale x 4 x i16> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16(
     <vscale x 8 x i16> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16(
     <vscale x 16 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16(
     <vscale x 32 x i16> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32(
     <vscale x 1 x i32> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32(
     <vscale x 2 x i32> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32(
     <vscale x 4 x i32> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32(
     <vscale x 8 x i32> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32(
     <vscale x 16 x i32> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
     <vscale x 1 x i64> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
     <vscale x 2 x i64> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
     <vscale x 4 x i64> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
index 884e8a7..c2c9355 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
index 9fe5ebb..cb2a32c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
index dfe2dd4..88b6f06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i32 @llvm.riscv.vfirst.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
index 08526b8..d5edf79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i64 @llvm.riscv.vfirst.i64.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vfirst.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.i64.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vfirst.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
index cce88c5..57e274a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
index 0c62230..55476ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
index 61f3f93..50b3356 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
index 3c6c540..88f5c38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
index 967c735..0140b73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
index d6f60fd..0af1d1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmax.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmax.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
index ade80c3..4b1dbb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -57,7 +57,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -80,7 +80,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -102,7 +102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -125,7 +125,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -192,7 +192,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -237,7 +237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -260,7 +260,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -282,7 +282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -305,7 +305,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -327,7 +327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -372,7 +372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -395,7 +395,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -417,7 +417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -440,7 +440,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -485,7 +485,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -507,7 +507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -556,7 +556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -583,7 +583,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -632,7 +632,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -654,7 +654,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -681,7 +681,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -697,7 +697,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -713,7 +713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -729,7 +729,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -745,7 +745,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -793,7 +793,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -809,7 +809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -841,7 +841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -857,7 +857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -873,7 +873,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -905,7 +905,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -921,7 +921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
index ce80e8b..042b8e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -57,7 +57,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -80,7 +80,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -102,7 +102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -125,7 +125,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -170,7 +170,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -192,7 +192,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -237,7 +237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -260,7 +260,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -282,7 +282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -305,7 +305,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -327,7 +327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -372,7 +372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -395,7 +395,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -417,7 +417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -440,7 +440,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -485,7 +485,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -507,7 +507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -530,7 +530,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -552,7 +552,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -575,7 +575,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -597,7 +597,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -620,7 +620,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -642,7 +642,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -665,7 +665,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmerge.vfm v8, v8, ft0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -681,7 +681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -697,7 +697,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -713,7 +713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -729,7 +729,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -745,7 +745,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -793,7 +793,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -809,7 +809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -841,7 +841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -857,7 +857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -873,7 +873,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -905,7 +905,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
index 6e75e42..376b95a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
index 4365034..a64f890 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmin.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmin.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmin.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmin.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmin.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmin.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmin.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
index c7be66b..43788ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
index e138e18..b2ad8d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
index 333fcce..732e410 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
index 3badef2..05ceede 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
index 1a8521f..574e49d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
index b285e36..4f514ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmul.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
index c6567d2..f91b0bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
@@ -332,7 +332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
@@ -346,7 +346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
@@ -360,7 +360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
@@ -374,7 +374,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
@@ -402,7 +402,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
@@ -416,7 +416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
@@ -444,7 +444,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
@@ -458,7 +458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
index 19df844..17e393b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfmv.v.f v8, fa0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
@@ -332,7 +332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
@@ -346,7 +346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
@@ -360,7 +360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
@@ -374,7 +374,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
@@ -402,7 +402,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
@@ -416,7 +416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
@@ -444,7 +444,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
@@ -458,7 +458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
index ec49899b..54a464e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
index 62d3433..f8edd41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
index cdc4494..ed6ec6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
index 0ae8e52..680d214 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
index ae6326f..5890c7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
index 3d94b5b..c94b3d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
   <vscale x 1 x i32>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
index a121d8a..c28ed78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
index e9ce1f4..1169526 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
   <vscale x 1 x float>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64(
     <vscale x 1 x float> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64(
     <vscale x 2 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64(
     <vscale x 4 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64(
     <vscale x 8 x float> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
index c8b5bf2..ff99af7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
index 6d3f753..817d964 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
index 313ccb4..0435383 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
index 0673d75..4bbb96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
index eec2899..77d868b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
index d9ffdea..aae7755 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
index 2d7140f..dcb2c1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
index 7b0d392..f445938 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
     <vscale x 2 x i8> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
     <vscale x 4 x i8> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
     <vscale x 8 x i8> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
     <vscale x 16 x i8> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
     <vscale x 32 x half> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
     <vscale x 32 x i8> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
     <vscale x 1 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
     <vscale x 1 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
     <vscale x 2 x i16> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
     <vscale x 4 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
     <vscale x 8 x float> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
     <vscale x 8 x i16> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
     <vscale x 16 x float> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
     <vscale x 16 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
     <vscale x 1 x i32> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
     <vscale x 2 x double> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
     <vscale x 2 x i32> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
     <vscale x 4 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
     <vscale x 4 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
     <vscale x 8 x double> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfncvt.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
index c572d45..563dbbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
index 693c736..51b9dd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
index c91996f..1cb14fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
index afd7e7c..95872af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmadd.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
index 1f12513..718cd37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
index 84b42e6..33dada4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
index ae477b7..edc840c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1029,7 +1029,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1057,7 +1057,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
index d554b4b..db2a165 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -613,7 +613,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -637,7 +637,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -709,7 +709,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -733,7 +733,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -757,7 +757,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -781,7 +781,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -829,7 +829,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -853,7 +853,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -877,7 +877,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -901,7 +901,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -925,7 +925,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfnmsub.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
index 36b095e..77ce843 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
index 1cf8176..52d9bd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrdiv.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
index 244903f..cb61dda 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
     <vscale x 8 x double> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
index 7b24fb9..786d363 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrec7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrec7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrec7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrec7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrec7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrec7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrec7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64(
     <vscale x 8 x double> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
index 02f1586..b9366cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
index 9521369..947db38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
index 6a4ee69..be34c49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
index 2a049c2..c34e654 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
index ba9c55f..64f498c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
index 8b08fe5..4066612 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
index 3317144..74811df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
index 9b706b5..5d6397e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
index 2740ecf..79154fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
     <vscale x 8 x double> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
index 3ea0f1d..41053b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
     <vscale x 32 x half> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.mask.nxv32f16(
     <vscale x 32 x half> %1,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
     <vscale x 1 x float> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
     <vscale x 2 x float> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
     <vscale x 4 x float> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
     <vscale x 8 x float> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
     <vscale x 16 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32(
     <vscale x 16 x float> %1,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
     <vscale x 1 x double> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
     <vscale x 2 x double> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
     <vscale x 4 x double> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
     <vscale x 8 x double> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsqrt7.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64(
     <vscale x 8 x double> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
index 90da0d1..62886f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
index be75554..a398630 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -59,7 +59,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -82,7 +82,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -105,7 +105,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -128,7 +128,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -174,7 +174,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -197,7 +197,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -220,7 +220,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -266,7 +266,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -289,7 +289,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -312,7 +312,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -335,7 +335,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -358,7 +358,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -381,7 +381,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -404,7 +404,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -450,7 +450,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -473,7 +473,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -496,7 +496,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -519,7 +519,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -588,7 +588,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -611,7 +611,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -634,7 +634,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -680,7 +680,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfrsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
index 80c04b0..394f653 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
index 733e7a8..b6c4d7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnj.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
index 087933b..90b74bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
index 392d16b..1283a20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjn.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
index ee481d2..3d31a50 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
index 17c262c..4484d5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1273,7 +1273,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1296,7 +1296,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1319,7 +1319,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsgnjx.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
index ec5f182..dbc23c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -522,7 +522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -549,7 +549,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -576,7 +576,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -630,7 +630,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -684,7 +684,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
index 882418a..a8b8dae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -35,7 +35,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -58,7 +58,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -81,7 +81,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -104,7 +104,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -127,7 +127,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -173,7 +173,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -196,7 +196,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -219,7 +219,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -242,7 +242,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -265,7 +265,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -288,7 +288,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -311,7 +311,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -334,7 +334,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -357,7 +357,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -403,7 +403,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -426,7 +426,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -449,7 +449,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -472,7 +472,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -495,7 +495,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -518,7 +518,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -587,7 +587,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -610,7 +610,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -633,7 +633,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -656,7 +656,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -679,7 +679,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1down.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
index be07b01..68a0d02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -60,7 +60,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -83,7 +83,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -107,7 +107,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -177,7 +177,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -224,7 +224,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -248,7 +248,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -295,7 +295,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -389,7 +389,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -412,7 +412,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -436,7 +436,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -459,7 +459,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -483,7 +483,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -506,7 +506,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -561,7 +561,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -589,7 +589,7 @@
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -616,7 +616,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -671,7 +671,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -699,7 +699,7 @@
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -726,7 +726,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
index 0fb2215..7c1ac30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -60,7 +60,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -83,7 +83,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -107,7 +107,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -130,7 +130,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -177,7 +177,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -224,7 +224,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -248,7 +248,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -295,7 +295,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -389,7 +389,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -412,7 +412,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -436,7 +436,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -459,7 +459,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -483,7 +483,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -506,7 +506,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -530,7 +530,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -553,7 +553,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -577,7 +577,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -600,7 +600,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -624,7 +624,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -647,7 +647,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -671,7 +671,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -694,7 +694,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
index c274712..4c405352 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
     <vscale x 1 x half> %1,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.mask.nxv2f16(
     <vscale x 2 x half> %1,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.mask.nxv4f16(
     <vscale x 4 x half> %1,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.mask.nxv8f16(
     <vscale x 8 x half> %1,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.mask.nxv16f16(
     <vscale x 16 x half> %1,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
     <vscale x 32 x half> %0,
@@ -228,7 +228,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -248,7 +248,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32(
     <vscale x 1 x float> %1,
@@ -268,7 +268,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32(
     <vscale x 2 x float> %1,
@@ -308,7 +308,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -328,7 +328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32(
     <vscale x 4 x float> %1,
@@ -348,7 +348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -368,7 +368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32(
     <vscale x 8 x float> %1,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
     <vscale x 16 x float> %0,
@@ -406,7 +406,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64(
     <vscale x 1 x double> %1,
@@ -446,7 +446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -466,7 +466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64(
     <vscale x 2 x double> %1,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -506,7 +506,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64(
     <vscale x 4 x double> %1,
@@ -526,7 +526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
index 454a1e5..04d411e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -32,7 +32,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x half> %0,
   <vscale x 1 x half> %1,
   <vscale x 1 x i1> %2,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x half> %0,
   <vscale x 2 x half> %1,
   <vscale x 2 x i1> %2,
@@ -102,7 +102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x half> %0,
   <vscale x 4 x half> %1,
   <vscale x 4 x i1> %2,
@@ -148,7 +148,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x half> %0,
   <vscale x 8 x half> %1,
   <vscale x 8 x i1> %2,
@@ -194,7 +194,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -216,7 +216,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x half> %0,
   <vscale x 16 x half> %1,
   <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   i64 %1) nounwind {
 entry:
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 32 x half> %0,
   <vscale x 32 x half> %1,
   <vscale x 32 x i1> %2,
@@ -286,7 +286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -308,7 +308,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x float> %0,
   <vscale x 1 x float> %1,
   <vscale x 1 x i1> %2,
@@ -332,7 +332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -354,7 +354,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x float> %0,
   <vscale x 2 x float> %1,
   <vscale x 2 x i1> %2,
@@ -378,7 +378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x float> %0,
   <vscale x 4 x float> %1,
   <vscale x 4 x i1> %2,
@@ -424,7 +424,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -446,7 +446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x float> %0,
   <vscale x 8 x float> %1,
   <vscale x 8 x i1> %2,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   i64 %1) nounwind {
 entry:
@@ -492,7 +492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 16 x float> %0,
   <vscale x 16 x float> %1,
   <vscale x 16 x i1> %2,
@@ -516,7 +516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -538,7 +538,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 1 x double> %0,
   <vscale x 1 x double> %1,
   <vscale x 1 x i1> %2,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 2 x double> %0,
   <vscale x 2 x double> %1,
   <vscale x 2 x i1> %2,
@@ -608,7 +608,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 4 x double> %0,
   <vscale x 4 x double> %1,
   <vscale x 4 x i1> %2,
@@ -654,7 +654,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   i64 %1) nounwind {
 entry:
@@ -676,7 +676,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsqrt.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
   <vscale x 8 x double> %0,
   <vscale x 8 x double> %1,
   <vscale x 8 x i1> %2,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
index 7aa38c0..dc139f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -254,7 +254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -497,7 +497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -519,7 +519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -563,7 +563,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -652,7 +652,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -675,7 +675,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -698,7 +698,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -721,7 +721,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -744,7 +744,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -767,7 +767,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -813,7 +813,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -836,7 +836,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -859,7 +859,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -882,7 +882,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -928,7 +928,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -951,7 +951,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1089,7 +1089,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1112,7 +1112,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1212,7 +1212,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1266,7 +1266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1347,7 +1347,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1374,7 +1374,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
index ee66082..1d11549 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -255,7 +255,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
     <vscale x 32 x half> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -299,7 +299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -321,7 +321,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -343,7 +343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -365,7 +365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -387,7 +387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -409,7 +409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -476,7 +476,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
     <vscale x 16 x float> %0,
@@ -498,7 +498,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -520,7 +520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
     <vscale x 2 x double> %0,
@@ -586,7 +586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -608,7 +608,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
     <vscale x 4 x double> %0,
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
     <vscale x 8 x double> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -699,7 +699,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -722,7 +722,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -745,7 +745,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -768,7 +768,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -791,7 +791,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -814,7 +814,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -837,7 +837,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -860,7 +860,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -883,7 +883,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -906,7 +906,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -929,7 +929,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
     <vscale x 32 x half> %0,
@@ -952,7 +952,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -975,7 +975,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -1044,7 +1044,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1067,7 +1067,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1090,7 +1090,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1136,7 +1136,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
     <vscale x 16 x float> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1205,7 +1205,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1228,7 +1228,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1274,7 +1274,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1297,7 +1297,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1320,7 +1320,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vfsub.vf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
     <vscale x 8 x double> %0,
@@ -1343,7 +1343,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vfsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
index 1fda439..2421163 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
index ecebe7a..9ce400d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index c6283ba..fc0322f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index 425dc37..3272e4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwadd.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
index 45b8316..6246970 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
index 3c5a037..fbe5d42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
index ee27b4b..19ccffd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
index dc4a30f..1f4b049 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.x.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
index c03fe32..6a51765 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
index 13d62b66..3a09349 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
index decb30b..980f8d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
index 07341b0..315bcc0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
index 618b8e9..0b70bc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
index 0538918..fff4917 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
index 95ef4469..87c8f8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
index d214671..de7eb29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
index 4439265..7eb7877 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
index 4b7bd24..6920b27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
   <vscale x 1 x half>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
     <vscale x 1 x half> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
     <vscale x 1 x i32> %0,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
     <vscale x 2 x half> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16(
     <vscale x 2 x i32> %0,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
     <vscale x 4 x half> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16(
     <vscale x 4 x i32> %0,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
     <vscale x 8 x half> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16(
     <vscale x 8 x i32> %0,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
     <vscale x 16 x half> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16(
     <vscale x 16 x i32> %0,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
     <vscale x 1 x float> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
     <vscale x 1 x i64> %0,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
     <vscale x 2 x float> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
     <vscale x 2 x i64> %0,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
     <vscale x 4 x float> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
     <vscale x 4 x i64> %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
     <vscale x 8 x float> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwcvt.xu.f.v v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
index a643431..fe38144 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
index 367ebdc..aea0274 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
index 7c04a9c..2892226 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
index acc2541..92c404d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
index cf42d05..5c54927 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
index 6392531..f69e29e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwmul.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwmul.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
index 0ed393a..cc6a3d2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
index 50533c4..76e4f1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmacc.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
index fc17078..46de3bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
index 5455d7e..38716d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -451,7 +451,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -475,7 +475,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -499,7 +499,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -523,7 +523,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -547,7 +547,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -595,7 +595,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -619,7 +619,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -691,7 +691,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -715,7 +715,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -739,7 +739,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -763,7 +763,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -811,7 +811,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -835,7 +835,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwnmsac.vf v8, ft0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
index ffff9c7..2aa9471 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv1f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
index 7941dc7..0f65959 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
index 203d0e5..744d6e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
index 4a6b5a7..bdab478 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
   <vscale x 2 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
     <vscale x 2 x float> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
     <vscale x 2 x float> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
     <vscale x 2 x float> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
     <vscale x 2 x float> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
     <vscale x 2 x float> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
     <vscale x 1 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
     <vscale x 1 x double> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
     <vscale x 1 x double> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
     <vscale x 1 x double> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
index b966461..4f96c8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
index ddf5652..81dfb7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -441,7 +441,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
@@ -512,7 +512,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -535,7 +535,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
@@ -559,7 +559,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -582,7 +582,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
@@ -606,7 +606,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -629,7 +629,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
@@ -653,7 +653,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v25, v8, ft0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -676,7 +676,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
@@ -700,7 +700,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v26, v8, ft0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -723,7 +723,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
@@ -747,7 +747,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v28, v8, ft0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -770,7 +770,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
@@ -794,7 +794,7 @@
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.vf v16, v8, ft0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.vf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index 4d00c88..48b76bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index 2718893..b97ee74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -215,7 +215,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
@@ -238,7 +238,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -260,7 +260,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -396,7 +396,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
@@ -419,7 +419,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -442,7 +442,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
     <vscale x 1 x float> %0,
@@ -465,7 +465,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -488,7 +488,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
     <vscale x 2 x float> %0,
@@ -511,7 +511,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -534,7 +534,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
     <vscale x 4 x float> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -580,7 +580,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
     <vscale x 8 x float> %0,
@@ -603,7 +603,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
     <vscale x 16 x float> %0,
@@ -649,7 +649,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -672,7 +672,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v9, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
     <vscale x 1 x double> %0,
@@ -695,7 +695,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -718,7 +718,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v10, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
     <vscale x 2 x double> %0,
@@ -741,7 +741,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -764,7 +764,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v12, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
     <vscale x 4 x double> %0,
@@ -787,7 +787,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
     <vscale x 8 x double> %0,
@@ -810,7 +810,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vfwsub.wf v8, v16, ft0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
index 99f773d..fc34722 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
   i32);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
     i32 %0)
@@ -27,7 +27,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
     i32 %0)
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
     i32 %0)
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -117,7 +117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
     i32 %0)
@@ -135,7 +135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -153,7 +153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
     i32 %0)
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -189,7 +189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
     i32 %0)
@@ -207,7 +207,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -225,7 +225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
     i32 %0)
@@ -243,7 +243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -261,7 +261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
     i32 %0)
@@ -279,7 +279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
     i32 %0)
@@ -315,7 +315,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -333,7 +333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
     i32 %0)
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -369,7 +369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
     i32 %0)
@@ -387,7 +387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -405,7 +405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
     i32 %0)
@@ -423,7 +423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
     i32 %0)
@@ -459,7 +459,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -477,7 +477,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
     i32 %0)
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -513,7 +513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
     i32 %0)
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -549,7 +549,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
     i32 %0)
@@ -567,7 +567,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
     i32 %0)
@@ -603,7 +603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
     i32 %0)
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -657,7 +657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
     i32 %0)
@@ -675,7 +675,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
     i32 %0)
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -729,7 +729,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
     i32 %0)
@@ -747,7 +747,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
index 10f060b..8285713 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
   i64);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
     i64 %0)
@@ -27,7 +27,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -45,7 +45,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
     i64 %0)
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
     i64 %0)
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -117,7 +117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
     i64 %0)
@@ -135,7 +135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -153,7 +153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
     i64 %0)
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -189,7 +189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
     i64 %0)
@@ -207,7 +207,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -225,7 +225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
     i64 %0)
@@ -243,7 +243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -261,7 +261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
     i64 %0)
@@ -279,7 +279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
     i64 %0)
@@ -315,7 +315,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -333,7 +333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
     i64 %0)
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -369,7 +369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
     i64 %0)
@@ -387,7 +387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -405,7 +405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
     i64 %0)
@@ -423,7 +423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
     i64 %0)
@@ -459,7 +459,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -477,7 +477,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
     i64 %0)
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -513,7 +513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
     i64 %0)
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -549,7 +549,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
     i64 %0)
@@ -567,7 +567,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
     i64 %0)
@@ -603,7 +603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
     i64 %0)
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -657,7 +657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
     i64 %0)
@@ -675,7 +675,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
     i64 %0)
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -729,7 +729,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
     i64 %0)
@@ -747,7 +747,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vid.v v8, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
index 109bd39..de22172 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i1> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
     <vscale x 2 x i1> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
     <vscale x 4 x i1> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
     <vscale x 8 x i1> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
     <vscale x 16 x i1> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
     <vscale x 32 x i1> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
     <vscale x 64 x i1> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
     <vscale x 1 x i1> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
     <vscale x 2 x i1> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
     <vscale x 4 x i1> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
     <vscale x 8 x i1> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
     <vscale x 16 x i1> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
     <vscale x 32 x i1> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
     <vscale x 1 x i1> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
     <vscale x 2 x i1> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
     <vscale x 4 x i1> %0,
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
     <vscale x 8 x i1> %0,
@@ -670,7 +670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -690,7 +690,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
     <vscale x 16 x i1> %0,
@@ -710,7 +710,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -730,7 +730,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
     <vscale x 1 x i1> %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -770,7 +770,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
     <vscale x 2 x i1> %0,
@@ -790,7 +790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
     <vscale x 4 x i1> %0,
@@ -830,7 +830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
     <vscale x 8 x i1> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
index 0f97ce3..c199a744 100644
--- a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
     <vscale x 1 x i1> %0,
@@ -30,7 +30,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
     <vscale x 2 x i1> %0,
@@ -70,7 +70,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
     <vscale x 4 x i1> %0,
@@ -110,7 +110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
     <vscale x 8 x i1> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
     <vscale x 16 x i1> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
     <vscale x 32 x i1> %0,
@@ -230,7 +230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
     <vscale x 64 x i1> %0,
@@ -270,7 +270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -290,7 +290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
     <vscale x 1 x i1> %0,
@@ -310,7 +310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -330,7 +330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
     <vscale x 2 x i1> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
     <vscale x 4 x i1> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -410,7 +410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
     <vscale x 8 x i1> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
     <vscale x 16 x i1> %0,
@@ -470,7 +470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -490,7 +490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
     <vscale x 32 x i1> %0,
@@ -510,7 +510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
     <vscale x 1 x i1> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
     <vscale x 2 x i1> %0,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
     <vscale x 4 x i1> %0,
@@ -630,7 +630,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
     <vscale x 8 x i1> %0,
@@ -670,7 +670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -690,7 +690,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
     <vscale x 16 x i1> %0,
@@ -710,7 +710,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -730,7 +730,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
     <vscale x 1 x i1> %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -770,7 +770,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
     <vscale x 2 x i1> %0,
@@ -790,7 +790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
     <vscale x 4 x i1> %0,
@@ -830,7 +830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    viota.m v8, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
     <vscale x 8 x i1> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    viota.m v8, v0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
index 4e4d781..6be4e41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>*,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -811,7 +811,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -871,7 +871,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -911,7 +911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -951,7 +951,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -971,7 +971,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -991,7 +991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1011,7 +1011,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1031,7 +1031,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1051,7 +1051,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1091,7 +1091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1111,7 +1111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1171,7 +1171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1191,7 +1191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1211,7 +1211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1271,7 +1271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1311,7 +1311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1351,7 +1351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1371,7 +1371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1391,7 +1391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1431,7 +1431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1451,7 +1451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
index abf3694..87ef0d9c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>*,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -811,7 +811,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -871,7 +871,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -911,7 +911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -951,7 +951,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -971,7 +971,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -991,7 +991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1011,7 +1011,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1031,7 +1031,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1051,7 +1051,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1091,7 +1091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1111,7 +1111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1171,7 +1171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1191,7 +1191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1211,7 +1211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1271,7 +1271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1311,7 +1311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1351,7 +1351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1371,7 +1371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1391,7 +1391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1431,7 +1431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1451,7 +1451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
index f7040f7..7688c4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>*, i32);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>* %0, i32 %1)
   ret <vscale x 1 x i1> %a
@@ -22,7 +22,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1(<vscale x 2 x i1>* %0, i32 %1)
   ret <vscale x 2 x i1> %a
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1(<vscale x 4 x i1>* %0, i32 %1)
   ret <vscale x 4 x i1> %a
@@ -48,7 +48,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1(<vscale x 8 x i1>* %0, i32 %1)
   ret <vscale x 8 x i1> %a
@@ -61,7 +61,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1(<vscale x 16 x i1>* %0, i32 %1)
   ret <vscale x 16 x i1> %a
@@ -74,7 +74,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1(<vscale x 32 x i1>* %0, i32 %1)
   ret <vscale x 32 x i1> %a
@@ -87,7 +87,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1(<vscale x 64 x i1>* %0, i32 %1)
   ret <vscale x 64 x i1> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
index 46c91f5..bc9f7df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>*, i64);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1(<vscale x 1 x i1>* %0, i64 %1)
   ret <vscale x 1 x i1> %a
@@ -22,7 +22,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1(<vscale x 2 x i1>* %0, i64 %1)
   ret <vscale x 2 x i1> %a
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1(<vscale x 4 x i1>* %0, i64 %1)
   ret <vscale x 4 x i1> %a
@@ -48,7 +48,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1(<vscale x 8 x i1>* %0, i64 %1)
   ret <vscale x 8 x i1> %a
@@ -61,7 +61,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1(<vscale x 16 x i1>* %0, i64 %1)
   ret <vscale x 16 x i1> %a
@@ -74,7 +74,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1(<vscale x 32 x i1>* %0, i64 %1)
   ret <vscale x 32 x i1> %a
@@ -87,7 +87,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vle1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1(<vscale x 64 x i1>* %0, i64 %1)
   ret <vscale x 64 x i1> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
index 485e363..9feafc4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
index 05204e6..c583732 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vloxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vloxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vloxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vloxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
index dca50df..cd73c09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>*,
   i32,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -803,7 +803,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -869,7 +869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -913,7 +913,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -935,7 +935,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -1045,7 +1045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1067,7 +1067,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -1089,7 +1089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1133,7 +1133,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1155,7 +1155,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1177,7 +1177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1199,7 +1199,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1221,7 +1221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1243,7 +1243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1265,7 +1265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1287,7 +1287,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1309,7 +1309,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1353,7 +1353,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1375,7 +1375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1397,7 +1397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1419,7 +1419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1441,7 +1441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1463,7 +1463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1485,7 +1485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1529,7 +1529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1551,7 +1551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1573,7 +1573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1595,7 +1595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1617,7 +1617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
index 180b352..af04c96 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>*,
   i64,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double>* %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double>* %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double>* %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double>* %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float>* %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float>* %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float>* %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float>* %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float>* %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,tu,mu
 ; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -803,7 +803,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -869,7 +869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -913,7 +913,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -935,7 +935,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -1045,7 +1045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1067,7 +1067,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half>* %0,
@@ -1089,7 +1089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half>* %0,
@@ -1133,7 +1133,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1155,7 +1155,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half>* %0,
@@ -1177,7 +1177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1199,7 +1199,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half>* %0,
@@ -1221,7 +1221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1243,7 +1243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half>* %0,
@@ -1265,7 +1265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1287,7 +1287,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half>* %0,
@@ -1309,7 +1309,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,tu,mu
 ; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -1353,7 +1353,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1375,7 +1375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -1397,7 +1397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1419,7 +1419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -1441,7 +1441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1463,7 +1463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -1485,7 +1485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -1529,7 +1529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1551,7 +1551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -1573,7 +1573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1595,7 +1595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -1617,7 +1617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,tu,mu
 ; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
index efb3648..ad54666 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
index 77136f2..af97244 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>*,
   <vscale x 1 x i64>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8>* %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8>* %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8>* %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8>* %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16>* %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16>* %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16>* %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16>* %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32>* %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32>* %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32>* %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32>* %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64>* %0,
@@ -573,7 +573,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -595,7 +595,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64>* %0,
@@ -617,7 +617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -639,7 +639,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64>* %0,
@@ -661,7 +661,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -683,7 +683,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64>* %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -728,7 +728,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
     <vscale x 1 x half>* %0,
@@ -750,7 +750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -773,7 +773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
     <vscale x 2 x half>* %0,
@@ -795,7 +795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
     <vscale x 4 x half>* %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
     <vscale x 8 x half>* %0,
@@ -885,7 +885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -908,7 +908,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
     <vscale x 1 x float>* %0,
@@ -930,7 +930,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -953,7 +953,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
     <vscale x 2 x float>* %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -998,7 +998,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
     <vscale x 4 x float>* %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
     <vscale x 8 x float>* %0,
@@ -1065,7 +1065,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1087,7 +1087,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
     <vscale x 1 x double>* %0,
@@ -1109,7 +1109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1131,7 +1131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
     <vscale x 2 x double>* %0,
@@ -1153,7 +1153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
     <vscale x 4 x double>* %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
     <vscale x 8 x double>* %0,
@@ -1241,7 +1241,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1264,7 +1264,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8>* %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1309,7 +1309,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8>* %0,
@@ -1331,7 +1331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1354,7 +1354,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8>* %0,
@@ -1376,7 +1376,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1399,7 +1399,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8>* %0,
@@ -1421,7 +1421,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8>* %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16>* %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1534,7 +1534,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16>* %0,
@@ -1556,7 +1556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1579,7 +1579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16>* %0,
@@ -1601,7 +1601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1624,7 +1624,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16>* %0,
@@ -1646,7 +1646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16>* %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1713,7 +1713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32>* %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1757,7 +1757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32>* %0,
@@ -1779,7 +1779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32>* %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32>* %0,
@@ -1867,7 +1867,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1889,7 +1889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32>* %0,
@@ -1911,7 +1911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1934,7 +1934,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64>* %0,
@@ -1956,7 +1956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -1979,7 +1979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64>* %0,
@@ -2001,7 +2001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2024,7 +2024,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64>* %0,
@@ -2046,7 +2046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64>* %0,
@@ -2091,7 +2091,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2114,7 +2114,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
     <vscale x 1 x half>* %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2159,7 +2159,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
     <vscale x 2 x half>* %0,
@@ -2181,7 +2181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2204,7 +2204,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
     <vscale x 4 x half>* %0,
@@ -2226,7 +2226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
     <vscale x 8 x half>* %0,
@@ -2271,7 +2271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2294,7 +2294,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
     <vscale x 16 x half>* %0,
@@ -2316,7 +2316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2338,7 +2338,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
     <vscale x 1 x float>* %0,
@@ -2360,7 +2360,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
     <vscale x 2 x float>* %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
     <vscale x 4 x float>* %0,
@@ -2448,7 +2448,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
     <vscale x 8 x float>* %0,
@@ -2492,7 +2492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2514,7 +2514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
     <vscale x 16 x float>* %0,
@@ -2536,7 +2536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2559,7 +2559,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei32.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
     <vscale x 1 x double>* %0,
@@ -2581,7 +2581,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2604,7 +2604,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei32.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
     <vscale x 2 x double>* %0,
@@ -2626,7 +2626,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2649,7 +2649,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei32.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
     <vscale x 4 x double>* %0,
@@ -2671,7 +2671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2694,7 +2694,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei32.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
     <vscale x 8 x double>* %0,
@@ -2716,7 +2716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2739,7 +2739,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8>* %0,
@@ -2761,7 +2761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2784,7 +2784,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8>* %0,
@@ -2806,7 +2806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2829,7 +2829,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8>* %0,
@@ -2851,7 +2851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2874,7 +2874,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8>* %0,
@@ -2896,7 +2896,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2919,7 +2919,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8>* %0,
@@ -2941,7 +2941,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -2964,7 +2964,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8>* %0,
@@ -2986,7 +2986,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3008,7 +3008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16>* %0,
@@ -3030,7 +3030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3052,7 +3052,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16>* %0,
@@ -3074,7 +3074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3096,7 +3096,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16>* %0,
@@ -3118,7 +3118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16>* %0,
@@ -3162,7 +3162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3184,7 +3184,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16>* %0,
@@ -3206,7 +3206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3228,7 +3228,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16>* %0,
@@ -3250,7 +3250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3273,7 +3273,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32>* %0,
@@ -3295,7 +3295,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3318,7 +3318,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32>* %0,
@@ -3340,7 +3340,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3363,7 +3363,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32>* %0,
@@ -3385,7 +3385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3408,7 +3408,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32>* %0,
@@ -3430,7 +3430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3453,7 +3453,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32>* %0,
@@ -3475,7 +3475,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3498,7 +3498,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64>* %0,
@@ -3520,7 +3520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3543,7 +3543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64>* %0,
@@ -3565,7 +3565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3588,7 +3588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64>* %0,
@@ -3610,7 +3610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3633,7 +3633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64>* %0,
@@ -3655,7 +3655,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3677,7 +3677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
     <vscale x 1 x half>* %0,
@@ -3699,7 +3699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3721,7 +3721,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
     <vscale x 2 x half>* %0,
@@ -3743,7 +3743,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3765,7 +3765,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
     <vscale x 4 x half>* %0,
@@ -3787,7 +3787,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3809,7 +3809,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
     <vscale x 8 x half>* %0,
@@ -3831,7 +3831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
     <vscale x 16 x half>* %0,
@@ -3875,7 +3875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3897,7 +3897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
     <vscale x 32 x half>* %0,
@@ -3919,7 +3919,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -3942,7 +3942,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
     <vscale x 1 x float>* %0,
@@ -3964,7 +3964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -3987,7 +3987,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
     <vscale x 2 x float>* %0,
@@ -4009,7 +4009,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4032,7 +4032,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
     <vscale x 4 x float>* %0,
@@ -4054,7 +4054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4077,7 +4077,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
     <vscale x 8 x float>* %0,
@@ -4099,7 +4099,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4122,7 +4122,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
     <vscale x 16 x float>* %0,
@@ -4144,7 +4144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4167,7 +4167,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei16.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
     <vscale x 1 x double>* %0,
@@ -4189,7 +4189,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4212,7 +4212,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei16.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
     <vscale x 2 x double>* %0,
@@ -4234,7 +4234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4257,7 +4257,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei16.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
     <vscale x 4 x double>* %0,
@@ -4279,7 +4279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4302,7 +4302,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei16.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
     <vscale x 8 x double>* %0,
@@ -4324,7 +4324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4346,7 +4346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8>* %0,
@@ -4368,7 +4368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4390,7 +4390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8>* %0,
@@ -4412,7 +4412,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4434,7 +4434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8>* %0,
@@ -4456,7 +4456,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4478,7 +4478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8>* %0,
@@ -4500,7 +4500,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4522,7 +4522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8>* %0,
@@ -4544,7 +4544,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8>* %0,
@@ -4588,7 +4588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4610,7 +4610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8>* %0,
@@ -4632,7 +4632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4655,7 +4655,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16>* %0,
@@ -4677,7 +4677,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4700,7 +4700,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16>* %0,
@@ -4722,7 +4722,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4745,7 +4745,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16>* %0,
@@ -4767,7 +4767,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4790,7 +4790,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16>* %0,
@@ -4812,7 +4812,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4835,7 +4835,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16>* %0,
@@ -4857,7 +4857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -4880,7 +4880,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16>* %0,
@@ -4902,7 +4902,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -4925,7 +4925,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32>* %0,
@@ -4947,7 +4947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -4970,7 +4970,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32>* %0,
@@ -4992,7 +4992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5015,7 +5015,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32>* %0,
@@ -5037,7 +5037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5060,7 +5060,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32>* %0,
@@ -5082,7 +5082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5105,7 +5105,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32>* %0,
@@ -5127,7 +5127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5150,7 +5150,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64>* %0,
@@ -5172,7 +5172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5195,7 +5195,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64>* %0,
@@ -5217,7 +5217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5240,7 +5240,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64>* %0,
@@ -5262,7 +5262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5285,7 +5285,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64>* %0,
@@ -5307,7 +5307,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5330,7 +5330,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
     <vscale x 1 x half>* %0,
@@ -5352,7 +5352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5375,7 +5375,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
     <vscale x 2 x half>* %0,
@@ -5397,7 +5397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5420,7 +5420,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
     <vscale x 4 x half>* %0,
@@ -5442,7 +5442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5465,7 +5465,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
     <vscale x 8 x half>* %0,
@@ -5487,7 +5487,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5510,7 +5510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
     <vscale x 16 x half>* %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
     <vscale x 32 x half>* %0,
@@ -5577,7 +5577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5600,7 +5600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
     <vscale x 1 x float>* %0,
@@ -5622,7 +5622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5645,7 +5645,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
     <vscale x 2 x float>* %0,
@@ -5667,7 +5667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5690,7 +5690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
     <vscale x 4 x float>* %0,
@@ -5712,7 +5712,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5735,7 +5735,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
     <vscale x 8 x float>* %0,
@@ -5757,7 +5757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5780,7 +5780,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
     <vscale x 16 x float>* %0,
@@ -5802,7 +5802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5825,7 +5825,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vluxei8.v v25, (a0), v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
     <vscale x 1 x double>* %0,
@@ -5847,7 +5847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -5870,7 +5870,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vluxei8.v v26, (a0), v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
     <vscale x 2 x double>* %0,
@@ -5892,7 +5892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -5915,7 +5915,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vluxei8.v v28, (a0), v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
     <vscale x 4 x double>* %0,
@@ -5937,7 +5937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -5960,7 +5960,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vluxei8.v v16, (a0), v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
     <vscale x 8 x double>* %0,
@@ -5982,7 +5982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
index 490f744..7e97778 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
index 46839d3..b82c667 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
index 86afd6e..be4ba09 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -843,7 +843,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -895,7 +895,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -910,7 +910,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -925,7 +925,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -940,7 +940,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -970,7 +970,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -985,7 +985,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1000,7 +1000,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1015,7 +1015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1030,7 +1030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1045,7 +1045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1060,7 +1060,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1075,7 +1075,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1090,7 +1090,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1105,7 +1105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1120,7 +1120,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1150,7 +1150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1165,7 +1165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1180,7 +1180,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1210,7 +1210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
index 2a4d8a8..a9fd51b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -811,7 +811,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -871,7 +871,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -901,7 +901,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -946,7 +946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -961,7 +961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -976,7 +976,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -991,7 +991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1006,7 +1006,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1036,7 +1036,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1051,7 +1051,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1096,7 +1096,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1156,7 +1156,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1171,7 +1171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vi v0, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
index 0186e0c..cbd6e60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -939,7 +939,7 @@
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -968,7 +968,7 @@
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v28, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1026,7 +1026,7 @@
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1043,7 +1043,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1060,7 +1060,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1094,7 +1094,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1128,7 +1128,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1145,7 +1145,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1179,7 +1179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1196,7 +1196,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1213,7 +1213,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1264,7 +1264,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1281,7 +1281,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1298,7 +1298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1315,7 +1315,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1332,7 +1332,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1349,7 +1349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1366,7 +1366,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1383,7 +1383,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1400,7 +1400,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
index 349efa4..a359474 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -933,7 +933,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1036,7 +1036,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1053,7 +1053,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1087,7 +1087,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1104,7 +1104,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1121,7 +1121,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1155,7 +1155,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1172,7 +1172,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1189,7 +1189,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1206,7 +1206,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1223,7 +1223,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1240,7 +1240,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1257,7 +1257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1274,7 +1274,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1308,7 +1308,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1325,7 +1325,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1342,7 +1342,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1359,7 +1359,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1376,7 +1376,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmadc.vim v25, v8, 9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
index 8105f15..d01fb2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
index f452904..0f0c2417 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmadd.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
index 4f93f7c..c8894f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
index 304606c..0ab530c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
index 2d5718c..9de2d8d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
index e2b9ec1..4f000bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
index bcba9a9..b920ecf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
index 792adc8..2003122 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmax.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
index 9a9ca53..60e3f70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
index 77a8506..5c6a106 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmaxu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
index 451dc77..a58733d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -998,7 +998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1014,7 +1014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1030,7 +1030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1078,7 +1078,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1094,7 +1094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1110,7 +1110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1142,7 +1142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1190,7 +1190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1206,7 +1206,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1222,7 +1222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1238,7 +1238,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1270,7 +1270,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1286,7 +1286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1302,7 +1302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1318,7 +1318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1334,7 +1334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
index 172330b..d8c91a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmerge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -990,7 +990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1006,7 +1006,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1022,7 +1022,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1038,7 +1038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1086,7 +1086,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1102,7 +1102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1134,7 +1134,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1150,7 +1150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1166,7 +1166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1198,7 +1198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1214,7 +1214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1246,7 +1246,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1262,7 +1262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1278,7 +1278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1310,7 +1310,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
index f588b57..a9549a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
index 62b625f5..16c169e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfeq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfeq.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfeq.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
index 2415e92..4a46b5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
index da01f12..efdb011 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfge.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfge.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
index 5d22524..e4d2a71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
index 780640e..9f53c61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfgt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfgt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
index 23f1ff6..5c84a6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
index f956804..1f1adb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfle.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfle.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
index 4bed8e4..36297ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
index 4450de6..1af3275 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmflt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmflt.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmflt.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
index a849c01..fca61cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1081,7 +1081,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1111,7 +1111,7 @@
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1168,7 +1168,7 @@
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
index ce840a6..d5eb280 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
     <vscale x 1 x half> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
     <vscale x 2 x half> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
     <vscale x 4 x half> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
     <vscale x 8 x half> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
     <vscale x 16 x half> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
     <vscale x 1 x float> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
     <vscale x 2 x float> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
     <vscale x 4 x float> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
     <vscale x 8 x float> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
     <vscale x 1 x double> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
     <vscale x 2 x double> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmfne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
     <vscale x 4 x double> %1,
@@ -636,7 +636,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
     <vscale x 1 x half> %0,
@@ -662,7 +662,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
     <vscale x 1 x i1> %0,
@@ -685,7 +685,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
     <vscale x 2 x half> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
     <vscale x 2 x i1> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
     <vscale x 4 x half> %0,
@@ -760,7 +760,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
     <vscale x 4 x i1> %0,
@@ -783,7 +783,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
     <vscale x 8 x half> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
     <vscale x 8 x i1> %0,
@@ -832,7 +832,7 @@
 ; CHECK-NEXT:    fmv.h.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
     <vscale x 16 x half> %0,
@@ -858,7 +858,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
     <vscale x 16 x i1> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
     <vscale x 1 x float> %0,
@@ -907,7 +907,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
     <vscale x 1 x i1> %0,
@@ -930,7 +930,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
     <vscale x 2 x float> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
     <vscale x 2 x i1> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
     <vscale x 4 x float> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
     <vscale x 4 x i1> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK-NEXT:    fmv.w.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
     <vscale x 8 x float> %0,
@@ -1054,7 +1054,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
     <vscale x 8 x i1> %0,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
     <vscale x 1 x double> %0,
@@ -1103,7 +1103,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
     <vscale x 1 x i1> %0,
@@ -1126,7 +1126,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
     <vscale x 2 x double> %0,
@@ -1152,7 +1152,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
     <vscale x 2 x i1> %0,
@@ -1175,7 +1175,7 @@
 ; CHECK-NEXT:    fmv.d.x ft0, a0
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmfne.vf v0, v8, ft0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
     <vscale x 4 x double> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmfne.vf v25, v8, ft0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
index 3124b30..580e236 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
index 634b268..065345d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmin.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
index 497a4c7..3cca47a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
index d068f9a..999c7465 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vminu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
index 69ca931..f66b998 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
index efdd113..1da5906 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnand.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
index 8fcbcc1..0264359 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
index 5859986..4187f97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
index 5e1181b..395f99e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
index d740494..b7ac478 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
index 3f4f36f..0731228 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
index 338862b..28337aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmornot.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
index 2f82f93..344bcfe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -843,7 +843,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -895,7 +895,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
index a1df2b2..a2d3fe1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -171,7 +171,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -191,7 +191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -211,7 +211,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -251,7 +251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -291,7 +291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -331,7 +331,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -371,7 +371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -391,7 +391,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -431,7 +431,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vv v0, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -471,7 +471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -491,7 +491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -511,7 +511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -551,7 +551,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -571,7 +571,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -591,7 +591,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -611,7 +611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -631,7 +631,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -691,7 +691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -731,7 +731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -751,7 +751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -811,7 +811,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -871,7 +871,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
index 98999ea..bcdc990 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -939,7 +939,7 @@
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -968,7 +968,7 @@
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v26, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v28, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1026,7 +1026,7 @@
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
index 96a98b8..dd8ce74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -13,7 +13,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -36,7 +36,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -59,7 +59,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -105,7 +105,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -128,7 +128,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -174,7 +174,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -197,7 +197,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -220,7 +220,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -266,7 +266,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -289,7 +289,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -312,7 +312,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -335,7 +335,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -358,7 +358,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -381,7 +381,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -404,7 +404,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -427,7 +427,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v9, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -450,7 +450,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v10, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -473,7 +473,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v12, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vvm v25, v8, v16, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -519,7 +519,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -542,7 +542,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -565,7 +565,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -588,7 +588,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -611,7 +611,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -634,7 +634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -657,7 +657,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -680,7 +680,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -703,7 +703,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -726,7 +726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -772,7 +772,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -795,7 +795,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -818,7 +818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -841,7 +841,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -864,7 +864,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -887,7 +887,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -910,7 +910,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -933,7 +933,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -956,7 +956,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -979,7 +979,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmsbc.vxm v25, v8, a0, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
index 5f290d4..1f320ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
index fc3ffa5..a057b90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsbf.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsbf.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
index 5e1f4d1..7ac570a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmseq.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmseq.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmseq.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
index 92bd7ed..8a1b9b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmseq.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmseq.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmseq.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
index be2e83d..560fa18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1688,7 +1688,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1748,7 +1748,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1780,7 +1780,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1808,7 +1808,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1840,7 +1840,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1857,7 +1857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1892,7 +1892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1910,7 +1910,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1927,7 +1927,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1945,7 +1945,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1962,7 +1962,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1980,7 +1980,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1997,7 +1997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2015,7 +2015,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2032,7 +2032,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2050,7 +2050,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2067,7 +2067,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2085,7 +2085,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2102,7 +2102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2120,7 +2120,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2137,7 +2137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2155,7 +2155,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2172,7 +2172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2207,7 +2207,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2225,7 +2225,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2242,7 +2242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2260,7 +2260,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2277,7 +2277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2295,7 +2295,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2347,7 +2347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2400,7 +2400,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2417,7 +2417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2435,7 +2435,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2452,7 +2452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2489,7 +2489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2507,7 +2507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2543,7 +2543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2561,7 +2561,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2579,7 +2579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2597,7 +2597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2615,7 +2615,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2651,7 +2651,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2669,7 +2669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2687,7 +2687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2705,7 +2705,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2723,7 +2723,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2741,7 +2741,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2767,7 +2767,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2793,7 +2793,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2819,7 +2819,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
index a8cf4a0..0b16145 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1683,7 +1683,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1732,7 +1732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1758,7 +1758,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1807,7 +1807,7 @@
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1824,7 +1824,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1842,7 +1842,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1859,7 +1859,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1877,7 +1877,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1894,7 +1894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1912,7 +1912,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1947,7 +1947,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1982,7 +1982,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1999,7 +1999,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2017,7 +2017,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2034,7 +2034,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2052,7 +2052,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2087,7 +2087,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2104,7 +2104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2139,7 +2139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2157,7 +2157,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2192,7 +2192,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2209,7 +2209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2227,7 +2227,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2244,7 +2244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2262,7 +2262,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2279,7 +2279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2297,7 +2297,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2314,7 +2314,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2332,7 +2332,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2349,7 +2349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2367,7 +2367,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2384,7 +2384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2402,7 +2402,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2419,7 +2419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2437,7 +2437,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2456,7 +2456,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2474,7 +2474,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2492,7 +2492,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2528,7 +2528,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2546,7 +2546,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2564,7 +2564,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2582,7 +2582,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2600,7 +2600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2636,7 +2636,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2654,7 +2654,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2672,7 +2672,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2690,7 +2690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2708,7 +2708,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2744,7 +2744,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2762,7 +2762,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
index c016149..cb1d820 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1688,7 +1688,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1748,7 +1748,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1780,7 +1780,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1808,7 +1808,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1840,7 +1840,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1857,7 +1857,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1892,7 +1892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1910,7 +1910,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1927,7 +1927,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1945,7 +1945,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1962,7 +1962,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1980,7 +1980,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1997,7 +1997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2015,7 +2015,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2032,7 +2032,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2050,7 +2050,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2067,7 +2067,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2085,7 +2085,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2102,7 +2102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2120,7 +2120,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2137,7 +2137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2155,7 +2155,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2172,7 +2172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2207,7 +2207,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2225,7 +2225,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2242,7 +2242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2260,7 +2260,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2277,7 +2277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2295,7 +2295,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2347,7 +2347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2400,7 +2400,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2417,7 +2417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2435,7 +2435,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2452,7 +2452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2489,7 +2489,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2507,7 +2507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2543,7 +2543,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2561,7 +2561,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2579,7 +2579,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2597,7 +2597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2615,7 +2615,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2651,7 +2651,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2669,7 +2669,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2687,7 +2687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2705,7 +2705,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2723,7 +2723,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2741,7 +2741,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2767,7 +2767,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2793,7 +2793,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2819,7 +2819,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
index 31eaf88..0fb94bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -948,7 +948,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -997,7 +997,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1023,7 +1023,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1046,7 +1046,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1072,7 +1072,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1121,7 +1121,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1144,7 +1144,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1170,7 +1170,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1193,7 +1193,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1219,7 +1219,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1242,7 +1242,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1340,7 +1340,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1366,7 +1366,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1487,7 +1487,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1562,7 +1562,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1585,7 +1585,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1611,7 +1611,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1634,7 +1634,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1660,7 +1660,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1683,7 +1683,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1732,7 +1732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1758,7 +1758,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmnand.mm v0, v25, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1807,7 +1807,7 @@
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v25, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1824,7 +1824,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1842,7 +1842,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1859,7 +1859,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1877,7 +1877,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1894,7 +1894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1912,7 +1912,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1947,7 +1947,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1982,7 +1982,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1999,7 +1999,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2017,7 +2017,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2034,7 +2034,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2052,7 +2052,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2069,7 +2069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2087,7 +2087,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmseq.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2104,7 +2104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmseq.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2139,7 +2139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2157,7 +2157,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2192,7 +2192,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2209,7 +2209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2227,7 +2227,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2244,7 +2244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2262,7 +2262,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2279,7 +2279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2297,7 +2297,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2314,7 +2314,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2332,7 +2332,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2349,7 +2349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2367,7 +2367,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2384,7 +2384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2402,7 +2402,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2419,7 +2419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2437,7 +2437,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -2456,7 +2456,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -2474,7 +2474,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -2492,7 +2492,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -2528,7 +2528,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2546,7 +2546,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2564,7 +2564,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2582,7 +2582,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2600,7 +2600,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2636,7 +2636,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2654,7 +2654,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2672,7 +2672,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2690,7 +2690,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2708,7 +2708,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2744,7 +2744,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2762,7 +2762,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0
 ; CHECK-NEXT:    vmandnot.mm v0, v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
index 199605b..5672f26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
index e03678a..18a0e8b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgt.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgt.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
index 71e499b..cb4d4d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v26, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v28, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
index fea3c22..092de13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v9, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v10, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v12, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v16, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsgtu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
index 13f93d9..a4d47ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
index 3712754..8ca43e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsif.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsif.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
index 74662ab..d043dfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsle.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmsle.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
index 62e5b3b..85e9485 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsle.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
index 5bf609e..ff3bc12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmsleu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
index 7ac748d..2f6fddf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsleu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
index 8ca44d6..cb488de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmslt.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmslt.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
index 6baa263..c105bf0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmslt.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsle.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsle.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsle.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsle.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
index 3ccd474..c3bdc22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmsltu.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
index 85898e4..1f4cd738 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsltu.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsltu.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -15, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -13, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -11, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -7, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -6
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -5, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -4
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -3, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -2
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vv v25, v8, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 2, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 3
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 4, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 5
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 6, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 7
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 13
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsleu.vi v25, v8, 14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, 15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsleu.vi v0, v8, -15
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsleu.vi v25, v8, -14, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
index b8feac3..f1d4ba5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1658,7 +1658,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1690,7 +1690,7 @@
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1718,7 +1718,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK-NEXT:    vmsne.vv v25, v8, v26, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1778,7 +1778,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmsne.vv v0, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vmsne.vv v25, v8, v28, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1827,7 +1827,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1845,7 +1845,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1862,7 +1862,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1915,7 +1915,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1932,7 +1932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1950,7 +1950,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1985,7 +1985,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -2002,7 +2002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -2037,7 +2037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2055,7 +2055,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2090,7 +2090,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2107,7 +2107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2125,7 +2125,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2142,7 +2142,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2160,7 +2160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2177,7 +2177,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2195,7 +2195,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2212,7 +2212,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2230,7 +2230,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2265,7 +2265,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2282,7 +2282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2300,7 +2300,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2317,7 +2317,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2352,7 +2352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2370,7 +2370,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2387,7 +2387,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2405,7 +2405,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2422,7 +2422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2440,7 +2440,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
index 349daf9..8d8ef76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -37,7 +37,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
     <vscale x 1 x i8> %1,
@@ -63,7 +63,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -89,7 +89,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
     <vscale x 2 x i8> %1,
@@ -115,7 +115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -141,7 +141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
     <vscale x 4 x i8> %1,
@@ -167,7 +167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
     <vscale x 8 x i8> %1,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -245,7 +245,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
     <vscale x 16 x i8> %1,
@@ -271,7 +271,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
     <vscale x 32 x i8> %1,
@@ -323,7 +323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
     <vscale x 1 x i16> %1,
@@ -375,7 +375,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -401,7 +401,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
     <vscale x 2 x i16> %1,
@@ -427,7 +427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -453,7 +453,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
     <vscale x 4 x i16> %1,
@@ -479,7 +479,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -505,7 +505,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
     <vscale x 8 x i16> %1,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -557,7 +557,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
     <vscale x 16 x i16> %1,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -609,7 +609,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
     <vscale x 1 x i32> %1,
@@ -635,7 +635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -661,7 +661,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
     <vscale x 2 x i32> %1,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -713,7 +713,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
     <vscale x 4 x i32> %1,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -765,7 +765,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
     <vscale x 8 x i32> %1,
@@ -791,7 +791,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v9, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
     <vscale x 1 x i64> %1,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v10, v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
     <vscale x 2 x i64> %1,
@@ -895,7 +895,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vv v0, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -921,7 +921,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmsne.vv v25, v12, v16, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
     <vscale x 4 x i64> %1,
@@ -947,7 +947,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -972,7 +972,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -994,7 +994,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1066,7 +1066,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1088,7 +1088,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1113,7 +1113,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1135,7 +1135,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1160,7 +1160,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1207,7 +1207,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1229,7 +1229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1301,7 +1301,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1348,7 +1348,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -1370,7 +1370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -1417,7 +1417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -1464,7 +1464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -1558,7 +1558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1583,7 +1583,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -1605,7 +1605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1630,7 +1630,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -1652,7 +1652,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1677,7 +1677,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -1699,7 +1699,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1724,7 +1724,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -1746,7 +1746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vx v0, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1771,7 +1771,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vx v25, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
@@ -1788,7 +1788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
     <vscale x 1 x i1> %0,
@@ -1823,7 +1823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
     <vscale x 2 x i1> %0,
@@ -1858,7 +1858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1876,7 +1876,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
     <vscale x 4 x i1> %0,
@@ -1893,7 +1893,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1911,7 +1911,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
     <vscale x 8 x i1> %0,
@@ -1928,7 +1928,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
     <vscale x 16 x i1> %0,
@@ -1963,7 +1963,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
     <vscale x 32 x i1> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2016,7 +2016,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
     <vscale x 1 x i1> %0,
@@ -2033,7 +2033,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2051,7 +2051,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
     <vscale x 2 x i1> %0,
@@ -2068,7 +2068,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2086,7 +2086,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
     <vscale x 4 x i1> %0,
@@ -2103,7 +2103,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
     <vscale x 8 x i1> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
     <vscale x 16 x i1> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2191,7 +2191,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
     <vscale x 1 x i1> %0,
@@ -2208,7 +2208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2226,7 +2226,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
     <vscale x 2 x i1> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2261,7 +2261,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
     <vscale x 4 x i1> %0,
@@ -2278,7 +2278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
     <vscale x 8 x i1> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2331,7 +2331,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
     <vscale x 1 x i1> %0,
@@ -2348,7 +2348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2366,7 +2366,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
     <vscale x 2 x i1> %0,
@@ -2383,7 +2383,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmsne.vi v0, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2401,7 +2401,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vmsne.vi v25, v8, 9, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
     <vscale x 4 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
index e0ee3a5..9a3a64d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
index bd07801..07e1b3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -33,7 +33,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -53,7 +53,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -75,7 +75,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -95,7 +95,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -117,7 +117,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -137,7 +137,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -159,7 +159,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -179,7 +179,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -201,7 +201,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -221,7 +221,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -243,7 +243,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -263,7 +263,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmsof.m v25, v0
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -285,7 +285,7 @@
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmsof.m v25, v8, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
index 512d8d4..155aba5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
index 78f2e63..74a9670 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
index 98e4349..c183192 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
index 84ff584..3665d9a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulh.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
index 6cbd01d..094866d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
index ef8fc76..1f99965 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
index e53fa4c..1c445f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
index 9bfe1fc..4d2eeb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vmulhu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
index ab714c9..cef71e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -280,7 +280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -316,7 +316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -352,7 +352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -406,7 +406,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
     <vscale x 1 x half> %0,
@@ -424,7 +424,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
     <vscale x 2 x half> %0,
@@ -442,7 +442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
     <vscale x 4 x half> %0,
@@ -460,7 +460,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
     <vscale x 8 x half> %0,
@@ -478,7 +478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
     <vscale x 16 x half> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
     <vscale x 32 x half> %0,
@@ -514,7 +514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
     <vscale x 1 x float> %0,
@@ -532,7 +532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
     <vscale x 2 x float> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
     <vscale x 4 x float> %0,
@@ -568,7 +568,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
     <vscale x 8 x float> %0,
@@ -586,7 +586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
     <vscale x 16 x float> %0,
@@ -604,7 +604,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
     <vscale x 1 x double> %0,
@@ -622,7 +622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
     <vscale x 2 x double> %0,
@@ -640,7 +640,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
     <vscale x 4 x double> %0,
@@ -658,7 +658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
index e0ab49f..8573f05 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -280,7 +280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -316,7 +316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -352,7 +352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -406,7 +406,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
     <vscale x 1 x half> %0,
@@ -424,7 +424,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
     <vscale x 2 x half> %0,
@@ -442,7 +442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
     <vscale x 4 x half> %0,
@@ -460,7 +460,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
     <vscale x 8 x half> %0,
@@ -478,7 +478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
     <vscale x 16 x half> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
     <vscale x 32 x half> %0,
@@ -514,7 +514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
     <vscale x 1 x float> %0,
@@ -532,7 +532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
     <vscale x 2 x float> %0,
@@ -550,7 +550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
     <vscale x 4 x float> %0,
@@ -568,7 +568,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
     <vscale x 8 x float> %0,
@@ -586,7 +586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
     <vscale x 16 x float> %0,
@@ -604,7 +604,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
     <vscale x 1 x double> %0,
@@ -622,7 +622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
     <vscale x 2 x double> %0,
@@ -640,7 +640,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
     <vscale x 4 x double> %0,
@@ -658,7 +658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.v v8, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
index a0287dc..94d6c28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
@@ -280,7 +280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
@@ -316,7 +316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 %0,
@@ -362,7 +362,7 @@
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 %0,
@@ -385,7 +385,7 @@
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 %0,
@@ -408,7 +408,7 @@
 ; CHECK-NEXT:    addi a0, sp, 8
 ; CHECK-NEXT:    vlse64.v v8, (a0), zero
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 %0,
@@ -422,7 +422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
@@ -436,7 +436,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
@@ -450,7 +450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
@@ -464,7 +464,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
@@ -478,7 +478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
@@ -492,7 +492,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
@@ -506,7 +506,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
@@ -520,7 +520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
@@ -534,7 +534,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
@@ -548,7 +548,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
@@ -590,7 +590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
@@ -604,7 +604,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
@@ -618,7 +618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
@@ -632,7 +632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
@@ -646,7 +646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
@@ -660,7 +660,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
@@ -674,7 +674,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 9,
@@ -688,7 +688,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 9,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 9,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 9,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
index 52acf99..c3a757b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
@@ -28,7 +28,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
@@ -46,7 +46,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
@@ -64,7 +64,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
@@ -82,7 +82,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
@@ -118,7 +118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
@@ -136,7 +136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
@@ -172,7 +172,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
@@ -190,7 +190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
@@ -208,7 +208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
@@ -244,7 +244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
@@ -262,7 +262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
@@ -280,7 +280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
@@ -316,7 +316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 %0,
@@ -352,7 +352,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 %0,
@@ -370,7 +370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 %0,
@@ -388,7 +388,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.x v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 %0,
@@ -402,7 +402,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
@@ -416,7 +416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
@@ -444,7 +444,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
@@ -458,7 +458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
@@ -500,7 +500,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
@@ -514,7 +514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
@@ -528,7 +528,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
@@ -556,7 +556,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
@@ -570,7 +570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
@@ -598,7 +598,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
@@ -612,7 +612,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
@@ -626,7 +626,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
@@ -640,7 +640,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
@@ -654,7 +654,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 9,
@@ -668,7 +668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 9,
@@ -682,7 +682,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 9,
@@ -696,7 +696,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vmv.v.i v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 9,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
index 7916e46..3a0de6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
index 4ced869..025e1f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxnor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
index cffa7ad..8155da1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
index e4c8895..7c9b704 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -51,7 +51,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
index 31af2f8..edd37e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
index 9a9644e..1972f87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclip.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclip.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclip.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
index d582ba8..fd8e9e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
index fd21d8b..5c66622 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnclipu.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
index 5b307ca..c846217 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
index 5a37fdb..fc9d906 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
index d19137d..4cad98ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v25, v9
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1567,7 +1567,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1597,7 +1597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v26, v10
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1627,7 +1627,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1657,7 +1657,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v28, v12
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
index 596092e..6bd5924 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vnmsub.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
index 71272f23..33ff811 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
index 30f11f8..94665ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsra.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsra.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsra.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
index 635c0b2..2840e7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
index d049ca6..55b4f93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1372,7 +1372,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -1456,7 +1456,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1471,7 +1471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1504,7 +1504,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -1522,7 +1522,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1537,7 +1537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1588,7 +1588,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1603,7 +1603,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1636,7 +1636,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1654,7 +1654,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1669,7 +1669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1702,7 +1702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1720,7 +1720,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1735,7 +1735,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1768,7 +1768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -1786,7 +1786,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vnsrl.wi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1834,7 +1834,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vnsrl.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
index fc0d3b5..a45736c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
index 980191a..8b4a3d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
index 48200b3..1350c34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i32 @llvm.riscv.vpopc.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
index d33787c8..6e0bae3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare i64 @llvm.riscv.vpopc.i64.nxv1i1(
   <vscale x 1 x i1>,
   i64);
@@ -10,7 +10,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -31,7 +31,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
     <vscale x 1 x i1> %0,
@@ -50,7 +50,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -71,7 +71,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
     <vscale x 2 x i1> %0,
@@ -90,7 +90,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -111,7 +111,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
     <vscale x 4 x i1> %0,
@@ -130,7 +130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -151,7 +151,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
     <vscale x 8 x i1> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -191,7 +191,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
     <vscale x 16 x i1> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -231,7 +231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
     <vscale x 32 x i1> %0,
@@ -250,7 +250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vpopc.m a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.i64.nxv64i1(
     <vscale x 64 x i1> %0,
@@ -271,7 +271,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vpopc.m a0, v25, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
     <vscale x 64 x i1> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
index 9f00209..d70f856 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
index 387bedd..ca22b16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
index 02ba0ab..6f02cea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
index fe56210..43fad32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmax.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
index b3e6656..efe85ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
index 034f630..9c2b918 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
index 866548f..cec0117 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
index 127e6f5..acaf4e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredmin.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
index 3d142c3..d9038aa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
index 89d14e9..f08fd12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredminu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
index 9c90c0e..6d5873a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
index 8fbaf45..de3f346 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
index 2fb704b..99928f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
index 088f249..3a92a1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
index 59bc5cd..282a3d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
index aa32c7a..2c5d826 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vredxor.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
index e416b73..3222405 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
index fbce5aa..90e3c52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrem.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrem.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
index 3ef4717..a26bcfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
index 57f4952..42bf90f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vremu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vremu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
index fe9aa32..d6c1c803 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -302,10 +302,10 @@
 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -485,7 +485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -508,7 +508,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -553,7 +553,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -576,7 +576,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -666,7 +666,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -689,7 +689,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -756,7 +756,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -779,7 +779,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -802,7 +802,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -825,7 +825,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -1163,7 +1163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -1253,7 +1253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -1322,7 +1322,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -1344,7 +1344,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -1367,7 +1367,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -1412,7 +1412,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -1480,7 +1480,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -1503,7 +1503,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -1525,7 +1525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -1548,7 +1548,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -1593,7 +1593,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -1638,7 +1638,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -1660,7 +1660,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -1683,7 +1683,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -1705,7 +1705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -1728,7 +1728,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -1773,7 +1773,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -1795,7 +1795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -1818,7 +1818,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -1840,7 +1840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -1908,7 +1908,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -1930,7 +1930,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -1953,7 +1953,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -1975,7 +1975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -2020,7 +2020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -2043,7 +2043,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -2065,7 +2065,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -2088,7 +2088,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2110,7 +2110,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2133,7 +2133,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2155,7 +2155,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2178,7 +2178,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2200,7 +2200,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2223,7 +2223,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2245,7 +2245,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2268,7 +2268,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2290,7 +2290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -2358,7 +2358,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -2380,7 +2380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -2403,7 +2403,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -2425,7 +2425,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -2448,7 +2448,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -2470,7 +2470,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -2515,7 +2515,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -2538,7 +2538,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -2560,7 +2560,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -2583,7 +2583,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -2605,7 +2605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -2628,7 +2628,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -2650,7 +2650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -2673,7 +2673,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -2695,7 +2695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -2718,7 +2718,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -2740,7 +2740,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -2763,7 +2763,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -2785,7 +2785,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -2808,7 +2808,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -2830,7 +2830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -2853,7 +2853,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -2875,7 +2875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -2898,7 +2898,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -2920,7 +2920,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -2943,7 +2943,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -2965,7 +2965,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -2983,7 +2983,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -2998,7 +2998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
     <vscale x 1 x i8> %0,
@@ -3016,7 +3016,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -3031,7 +3031,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
     <vscale x 2 x i8> %0,
@@ -3049,7 +3049,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -3064,7 +3064,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
     <vscale x 4 x i8> %0,
@@ -3082,7 +3082,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -3097,7 +3097,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
     <vscale x 8 x i8> %0,
@@ -3115,7 +3115,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -3130,7 +3130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
     <vscale x 16 x i8> %0,
@@ -3148,7 +3148,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -3163,7 +3163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
     <vscale x 32 x i8> %0,
@@ -3181,7 +3181,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -3196,7 +3196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
     <vscale x 64 x i8> %0,
@@ -3214,7 +3214,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -3229,7 +3229,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
     <vscale x 1 x i16> %0,
@@ -3247,7 +3247,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -3262,7 +3262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
     <vscale x 2 x i16> %0,
@@ -3280,7 +3280,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -3295,7 +3295,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
     <vscale x 4 x i16> %0,
@@ -3313,7 +3313,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -3328,7 +3328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
     <vscale x 8 x i16> %0,
@@ -3346,7 +3346,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -3361,7 +3361,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
     <vscale x 16 x i16> %0,
@@ -3379,7 +3379,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -3394,7 +3394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
     <vscale x 32 x i16> %0,
@@ -3412,7 +3412,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -3427,7 +3427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -3445,7 +3445,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -3460,7 +3460,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -3478,7 +3478,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -3493,7 +3493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -3511,7 +3511,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -3526,7 +3526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -3544,7 +3544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -3559,7 +3559,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -3592,7 +3592,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
     <vscale x 1 x half> %0,
@@ -3610,7 +3610,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -3625,7 +3625,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
     <vscale x 2 x half> %0,
@@ -3643,7 +3643,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -3658,7 +3658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
     <vscale x 4 x half> %0,
@@ -3676,7 +3676,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -3691,7 +3691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
     <vscale x 8 x half> %0,
@@ -3709,7 +3709,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -3724,7 +3724,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
     <vscale x 16 x half> %0,
@@ -3742,7 +3742,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -3757,7 +3757,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
     <vscale x 32 x half> %0,
@@ -3775,7 +3775,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -3790,7 +3790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
     <vscale x 1 x float> %0,
@@ -3808,7 +3808,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -3823,7 +3823,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
     <vscale x 2 x float> %0,
@@ -3841,7 +3841,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -3856,7 +3856,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
     <vscale x 4 x float> %0,
@@ -3874,7 +3874,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -3889,7 +3889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
     <vscale x 8 x float> %0,
@@ -3907,7 +3907,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -3922,7 +3922,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
     <vscale x 16 x float> %0,
@@ -3940,7 +3940,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -3955,7 +3955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
     <vscale x 1 x double> %0,
@@ -3973,7 +3973,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -3988,7 +3988,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
     <vscale x 2 x double> %0,
@@ -4006,7 +4006,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -4021,7 +4021,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
     <vscale x 4 x double> %0,
@@ -4039,7 +4039,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
     <vscale x 8 x double> %0,
@@ -4054,7 +4054,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
index 963f795..e28ce70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -302,10 +302,10 @@
 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -485,7 +485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -508,7 +508,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -553,7 +553,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -576,7 +576,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -666,7 +666,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -689,7 +689,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -756,7 +756,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -779,7 +779,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -802,7 +802,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -825,7 +825,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -870,7 +870,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -915,7 +915,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -960,7 +960,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1006,7 +1006,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -1028,7 +1028,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -1051,7 +1051,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -1073,7 +1073,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -1096,7 +1096,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -1163,7 +1163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -1322,7 +1322,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -1344,7 +1344,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -1367,7 +1367,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -1389,7 +1389,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -1412,7 +1412,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -1480,7 +1480,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -1503,7 +1503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -1525,7 +1525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -1548,7 +1548,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -1593,7 +1593,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -1638,7 +1638,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -1661,7 +1661,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -1684,7 +1684,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -1706,7 +1706,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -1729,7 +1729,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -1751,7 +1751,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -1774,7 +1774,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -1796,7 +1796,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -1864,7 +1864,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -1886,7 +1886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -1909,7 +1909,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -1931,7 +1931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -1954,7 +1954,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -1999,7 +1999,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -2021,7 +2021,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -2044,7 +2044,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -2066,7 +2066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -2111,7 +2111,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -2134,7 +2134,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -2156,7 +2156,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -2179,7 +2179,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -2201,7 +2201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -2224,7 +2224,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -2246,7 +2246,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -2291,7 +2291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -2314,7 +2314,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -2336,7 +2336,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -2359,7 +2359,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -2381,7 +2381,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -2404,7 +2404,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -2449,7 +2449,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -2471,7 +2471,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -2494,7 +2494,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2516,7 +2516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2539,7 +2539,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2561,7 +2561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2584,7 +2584,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2629,7 +2629,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2651,7 +2651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2674,7 +2674,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -2696,7 +2696,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -2719,7 +2719,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -2741,7 +2741,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -2764,7 +2764,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -2786,7 +2786,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -2809,7 +2809,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -2831,7 +2831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -2854,7 +2854,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -2876,7 +2876,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -2899,7 +2899,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -2921,7 +2921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -2944,7 +2944,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -2966,7 +2966,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -2989,7 +2989,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -3011,7 +3011,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -3034,7 +3034,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -3056,7 +3056,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -3079,7 +3079,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -3101,7 +3101,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -3124,7 +3124,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -3146,7 +3146,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -3169,7 +3169,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -3191,7 +3191,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -3214,7 +3214,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -3236,7 +3236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -3259,7 +3259,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -3281,7 +3281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -3304,7 +3304,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -3326,7 +3326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -3344,7 +3344,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -3359,7 +3359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
     <vscale x 1 x i8> %0,
@@ -3377,7 +3377,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -3392,7 +3392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
     <vscale x 2 x i8> %0,
@@ -3410,7 +3410,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -3425,7 +3425,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
     <vscale x 4 x i8> %0,
@@ -3443,7 +3443,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -3458,7 +3458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
     <vscale x 8 x i8> %0,
@@ -3476,7 +3476,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -3491,7 +3491,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
     <vscale x 16 x i8> %0,
@@ -3509,7 +3509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -3524,7 +3524,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
     <vscale x 32 x i8> %0,
@@ -3542,7 +3542,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -3557,7 +3557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
     <vscale x 64 x i8> %0,
@@ -3575,7 +3575,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -3590,7 +3590,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
     <vscale x 1 x i16> %0,
@@ -3608,7 +3608,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -3623,7 +3623,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
     <vscale x 2 x i16> %0,
@@ -3641,7 +3641,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -3656,7 +3656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
     <vscale x 4 x i16> %0,
@@ -3674,7 +3674,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -3689,7 +3689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
     <vscale x 8 x i16> %0,
@@ -3707,7 +3707,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -3722,7 +3722,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
     <vscale x 16 x i16> %0,
@@ -3740,7 +3740,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -3755,7 +3755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
     <vscale x 32 x i16> %0,
@@ -3773,7 +3773,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -3788,7 +3788,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
     <vscale x 1 x i32> %0,
@@ -3806,7 +3806,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -3821,7 +3821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
     <vscale x 2 x i32> %0,
@@ -3839,7 +3839,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -3854,7 +3854,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
     <vscale x 4 x i32> %0,
@@ -3872,7 +3872,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -3887,7 +3887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
     <vscale x 8 x i32> %0,
@@ -3905,7 +3905,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -3920,7 +3920,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
     <vscale x 16 x i32> %0,
@@ -3938,7 +3938,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -3953,7 +3953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -3971,7 +3971,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -3986,7 +3986,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -4004,7 +4004,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -4019,7 +4019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -4037,7 +4037,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -4052,7 +4052,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -4070,7 +4070,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -4085,7 +4085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
     <vscale x 1 x half> %0,
@@ -4103,7 +4103,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -4118,7 +4118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
     <vscale x 2 x half> %0,
@@ -4136,7 +4136,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -4151,7 +4151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
     <vscale x 4 x half> %0,
@@ -4169,7 +4169,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -4184,7 +4184,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
     <vscale x 8 x half> %0,
@@ -4202,7 +4202,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -4217,7 +4217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
     <vscale x 16 x half> %0,
@@ -4235,7 +4235,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -4250,7 +4250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
     <vscale x 32 x half> %0,
@@ -4268,7 +4268,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -4283,7 +4283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
     <vscale x 1 x float> %0,
@@ -4301,7 +4301,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -4316,7 +4316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
     <vscale x 2 x float> %0,
@@ -4334,7 +4334,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -4349,7 +4349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
     <vscale x 4 x float> %0,
@@ -4367,7 +4367,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -4382,7 +4382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
     <vscale x 8 x float> %0,
@@ -4400,7 +4400,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -4415,7 +4415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
     <vscale x 16 x float> %0,
@@ -4433,7 +4433,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrgather.vi v25, v8, 9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -4448,7 +4448,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
     <vscale x 1 x double> %0,
@@ -4466,7 +4466,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrgather.vi v26, v8, 9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -4481,7 +4481,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
     <vscale x 2 x double> %0,
@@ -4499,7 +4499,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgather.vi v28, v8, 9
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -4514,7 +4514,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
     <vscale x 4 x double> %0,
@@ -4532,7 +4532,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgather.vi v16, v8, 9
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
     <vscale x 8 x double> %0,
@@ -4547,7 +4547,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
index 7a038d0..bdd4b36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -553,7 +553,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -575,7 +575,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -598,7 +598,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -620,7 +620,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -665,7 +665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -688,7 +688,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -756,7 +756,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -779,7 +779,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -802,7 +802,7 @@
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -825,7 +825,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1163,7 +1163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1322,7 +1322,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -1345,7 +1345,7 @@
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
index f6d8f1b..edc9986 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v10
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v12
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v16
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -530,7 +530,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -553,7 +553,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -575,7 +575,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -598,7 +598,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -620,7 +620,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -643,7 +643,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -665,7 +665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -688,7 +688,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -711,7 +711,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -734,7 +734,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -756,7 +756,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -779,7 +779,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -802,7 +802,7 @@
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -825,7 +825,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
     <vscale x 1 x half> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -870,7 +870,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.nxv2f16(
     <vscale x 2 x half> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -915,7 +915,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.nxv4f16(
     <vscale x 4 x half> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -960,7 +960,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.nxv8f16(
     <vscale x 8 x half> %0,
@@ -982,7 +982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1050,7 +1050,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1073,7 +1073,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vrgatherei16.vv.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1096,7 +1096,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1163,7 +1163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -1322,7 +1322,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64(
     <vscale x 8 x double> %0,
@@ -1345,7 +1345,7 @@
 ; CHECK-NEXT:    vl2re16.v v26, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrgatherei16.vv v8, v16, v26, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index 39eda68..7123cac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -809,7 +809,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v25, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -838,7 +838,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v25, v9, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -866,7 +866,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v26, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -895,7 +895,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v26, v10, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -923,7 +923,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v28, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -952,7 +952,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v28, v12, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -980,7 +980,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v16, v8
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1009,7 +1009,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v24, v16, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1026,7 +1026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1058,7 +1058,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1073,7 +1073,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1090,7 +1090,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1105,7 +1105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1122,7 +1122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1154,7 +1154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1169,7 +1169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1186,7 +1186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1201,7 +1201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1218,7 +1218,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1233,7 +1233,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1250,7 +1250,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1265,7 +1265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1282,7 +1282,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1297,7 +1297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1314,7 +1314,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1329,7 +1329,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1361,7 +1361,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1393,7 +1393,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1410,7 +1410,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1425,7 +1425,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1442,7 +1442,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1474,7 +1474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1506,7 +1506,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1521,7 +1521,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1538,7 +1538,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1570,7 +1570,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1585,7 +1585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1602,7 +1602,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1617,7 +1617,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1634,7 +1634,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1649,7 +1649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1666,7 +1666,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1681,7 +1681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1698,7 +1698,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1713,7 +1713,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
index 6b364a1..a6e986a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -803,7 +803,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -913,7 +913,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -935,7 +935,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -989,7 +989,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1006,7 +1006,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1021,7 +1021,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1038,7 +1038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1053,7 +1053,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1102,7 +1102,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1117,7 +1117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1134,7 +1134,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1149,7 +1149,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1166,7 +1166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1198,7 +1198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1213,7 +1213,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1245,7 +1245,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1262,7 +1262,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1309,7 +1309,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1326,7 +1326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1341,7 +1341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1358,7 +1358,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1373,7 +1373,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1390,7 +1390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1405,7 +1405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1437,7 +1437,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1454,7 +1454,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1469,7 +1469,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1486,7 +1486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1501,7 +1501,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1518,7 +1518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1550,7 +1550,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1565,7 +1565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1582,7 +1582,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1597,7 +1597,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1614,7 +1614,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1629,7 +1629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1646,7 +1646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vrsub.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1661,7 +1661,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
index 53a1866..cf6451b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
index 89a5b9d..fa88d29 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
index 77136dc..9fb3abb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
index 05da897..89f54c3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
index ca893a4..30860f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -898,7 +898,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v25, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -926,7 +926,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v26, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v28, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -982,7 +982,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
index 03c88c2..2f0fe02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -56,7 +56,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -78,7 +78,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -100,7 +100,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -122,7 +122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -144,7 +144,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -166,7 +166,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -188,7 +188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -232,7 +232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -254,7 +254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -276,7 +276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -298,7 +298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v9, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v10, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -562,7 +562,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -584,7 +584,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -606,7 +606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -628,7 +628,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -650,7 +650,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -672,7 +672,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -694,7 +694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -716,7 +716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -738,7 +738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -760,7 +760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -782,7 +782,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -804,7 +804,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -826,7 +826,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -848,7 +848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -870,7 +870,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -914,7 +914,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -936,7 +936,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -958,7 +958,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsbc.vxm v8, v8, a0, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
index d82124e..1d3bfa1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -54,7 +54,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -75,7 +75,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -96,7 +96,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -117,7 +117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -138,7 +138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -159,7 +159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -180,7 +180,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -201,7 +201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -222,7 +222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -243,7 +243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -264,7 +264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -285,7 +285,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -306,7 +306,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -348,7 +348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -369,7 +369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -432,7 +432,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -453,7 +453,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -516,7 +516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -537,7 +537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -558,7 +558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -579,7 +579,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -600,7 +600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -642,7 +642,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -663,7 +663,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -726,7 +726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -747,7 +747,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -768,7 +768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -789,7 +789,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -852,7 +852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -873,7 +873,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -915,7 +915,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -936,7 +936,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -999,7 +999,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1083,7 +1083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1104,7 +1104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1125,7 +1125,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1146,7 +1146,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1167,7 +1167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1188,7 +1188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1209,7 +1209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1314,7 +1314,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1356,7 +1356,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1377,7 +1377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1398,7 +1398,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1419,7 +1419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1440,7 +1440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1482,7 +1482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1503,7 +1503,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1524,7 +1524,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1545,7 +1545,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
index 048c894..801b2d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
 ; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -54,7 +54,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -75,7 +75,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -96,7 +96,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -117,7 +117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -138,7 +138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -159,7 +159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -180,7 +180,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -201,7 +201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -222,7 +222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -243,7 +243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -264,7 +264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -285,7 +285,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -306,7 +306,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -327,7 +327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -348,7 +348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -369,7 +369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -390,7 +390,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -411,7 +411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -432,7 +432,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -453,7 +453,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -516,7 +516,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -537,7 +537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -558,7 +558,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -579,7 +579,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -600,7 +600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -642,7 +642,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -663,7 +663,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -705,7 +705,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -726,7 +726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -747,7 +747,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -768,7 +768,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -789,7 +789,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -831,7 +831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -852,7 +852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -873,7 +873,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -915,7 +915,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -936,7 +936,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -999,7 +999,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1020,7 +1020,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1083,7 +1083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1104,7 +1104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1125,7 +1125,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1146,7 +1146,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1167,7 +1167,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1188,7 +1188,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1209,7 +1209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1230,7 +1230,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1251,7 +1251,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1293,7 +1293,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1314,7 +1314,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1356,7 +1356,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1377,7 +1377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1398,7 +1398,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1419,7 +1419,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1440,7 +1440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1482,7 +1482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1503,7 +1503,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1524,7 +1524,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1545,7 +1545,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
index d94125d..e07a48d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i32);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i32 %2)
   ret void
@@ -22,7 +22,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i32 %2)
   ret void
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i32 %2)
   ret void
@@ -48,7 +48,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i32 %2)
   ret void
@@ -61,7 +61,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i32 %2)
   ret void
@@ -74,7 +74,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i32 %2)
   ret void
@@ -87,7 +87,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i32 %2)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
index 48d4585..a1dd8a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 
 declare void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>*, i64);
 
@@ -9,7 +9,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1>* %1, i64 %2)
   ret void
@@ -22,7 +22,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1>* %1, i64 %2)
   ret void
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1>* %1, i64 %2)
   ret void
@@ -48,7 +48,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1>* %1, i64 %2)
   ret void
@@ -61,7 +61,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1>* %1, i64 %2)
   ret void
@@ -74,7 +74,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1>* %1, i64 %2)
   ret void
@@ -87,7 +87,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vse1.v v0, (a0)
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vse1.nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1>* %1, i64 %2)
   ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
index aead67d..77b681e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -646,7 +646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -708,7 +708,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -728,7 +728,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -769,7 +769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -831,7 +831,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -872,7 +872,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -913,7 +913,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -933,7 +933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
index 6014e27..96b9fd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
index b6cda06..da86323 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -805,7 +805,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v25, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v25, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -831,7 +831,7 @@
 ; CHECK-NEXT:    vslide1down.vx v25, v25, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v25, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -855,7 +855,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v26, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v26, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -881,7 +881,7 @@
 ; CHECK-NEXT:    vslide1down.vx v26, v26, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -905,7 +905,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v28, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v28, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -931,7 +931,7 @@
 ; CHECK-NEXT:    vslide1down.vx v28, v28, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -955,7 +955,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -981,7 +981,7 @@
 ; CHECK-NEXT:    vslide1down.vx v16, v16, a1
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
index dccc604..1f11be1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -297,7 +297,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -319,7 +319,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -341,7 +341,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -363,7 +363,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -385,7 +385,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -429,7 +429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -451,7 +451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -473,7 +473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -517,7 +517,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -539,7 +539,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -561,7 +561,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -583,7 +583,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -627,7 +627,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -649,7 +649,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -671,7 +671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -693,7 +693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -715,7 +715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -737,7 +737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -759,7 +759,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -781,7 +781,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -803,7 +803,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -847,7 +847,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -869,7 +869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -891,7 +891,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -913,7 +913,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -935,7 +935,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -957,7 +957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
index eb907ea..5a1f0a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -823,7 +823,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v25, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -849,7 +849,7 @@
 ; CHECK-NEXT:    vslide1up.vx v26, v25, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v26, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -873,7 +873,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v26, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -899,7 +899,7 @@
 ; CHECK-NEXT:    vslide1up.vx v28, v26, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v28, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -923,7 +923,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v28, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -949,7 +949,7 @@
 ; CHECK-NEXT:    vslide1up.vx v12, v28, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -973,7 +973,7 @@
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a1
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -999,7 +999,7 @@
 ; CHECK-NEXT:    vslide1up.vx v16, v24, a0
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
index e69e4ad..830a753 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vslide1up.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vslide1up.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index a8aaee0..a389a20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
index 3588f07..9084196 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 5188670..7d9eb8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
index 07e2286..bb0c37d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -52,7 +52,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -68,7 +68,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -91,7 +91,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -114,7 +114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -131,7 +131,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -147,7 +147,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -170,7 +170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -193,7 +193,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -210,7 +210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -226,7 +226,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -249,7 +249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -272,7 +272,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -289,7 +289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -328,7 +328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -351,7 +351,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -368,7 +368,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -384,7 +384,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -407,7 +407,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -447,7 +447,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -463,7 +463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -486,7 +486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -509,7 +509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -526,7 +526,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -542,7 +542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -565,7 +565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -588,7 +588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -644,7 +644,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -667,7 +667,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -684,7 +684,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -700,7 +700,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -723,7 +723,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -746,7 +746,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -763,7 +763,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -779,7 +779,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -802,7 +802,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -825,7 +825,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -842,7 +842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -858,7 +858,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -881,7 +881,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -904,7 +904,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -921,7 +921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -937,7 +937,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1000,7 +1000,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1016,7 +1016,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1039,7 +1039,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1062,7 +1062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1079,7 +1079,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1095,7 +1095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1118,7 +1118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1141,7 +1141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1174,7 +1174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1197,7 +1197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1220,7 +1220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1237,7 +1237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1253,7 +1253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1276,7 +1276,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1299,7 +1299,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1316,7 +1316,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1332,7 +1332,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1355,7 +1355,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1395,7 +1395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1411,7 +1411,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1434,7 +1434,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1457,7 +1457,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1474,7 +1474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1490,7 +1490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1513,7 +1513,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1536,7 +1536,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1569,7 +1569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1592,7 +1592,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1615,7 +1615,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1632,7 +1632,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1648,7 +1648,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1671,7 +1671,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1694,7 +1694,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1711,7 +1711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1727,7 +1727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1750,7 +1750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1773,7 +1773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1790,7 +1790,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1869,7 +1869,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -1908,7 +1908,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1931,7 +1931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1948,7 +1948,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1964,7 +1964,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -1987,7 +1987,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2027,7 +2027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2043,7 +2043,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -2066,7 +2066,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2122,7 +2122,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -2145,7 +2145,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2201,7 +2201,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -2224,7 +2224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2247,7 +2247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2280,7 +2280,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -2303,7 +2303,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2326,7 +2326,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2343,7 +2343,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64(
     <vscale x 4 x double> %0,
@@ -2359,7 +2359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
     <vscale x 4 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
index a85288a..6a803ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
index 9e6c625..5826a78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
index 93d6a72..e0f63a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
index 083b4d5..c131a62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
index 142d740..44c020f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
index d8cc06f..4a13cb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
index 3831cf7..7d11891 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
index 3e07867..38a7819 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
index 044a729..f7d9310 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
index e32a30a..2d5113d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
index 69c4872..ca2b8f6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
index c17030d..5461856 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
 ; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e32,m8,ta,mu
 ; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e16,m8,ta,mu
 ; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a2, a2, e8,m8,ta,mu
 ; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 945b72f..bc769ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -982,7 +982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1004,7 +1004,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1026,7 +1026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1048,7 +1048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1136,7 +1136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1180,7 +1180,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1202,7 +1202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1224,7 +1224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1246,7 +1246,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1290,7 +1290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1312,7 +1312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1334,7 +1334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1356,7 +1356,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1400,7 +1400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1488,7 +1488,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1510,7 +1510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1532,7 +1532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1554,7 +1554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1598,7 +1598,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1620,7 +1620,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1642,7 +1642,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1664,7 +1664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1686,7 +1686,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1708,7 +1708,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1730,7 +1730,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1752,7 +1752,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1769,7 +1769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1784,7 +1784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1816,7 +1816,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1833,7 +1833,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1848,7 +1848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1865,7 +1865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1912,7 +1912,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2008,7 +2008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2040,7 +2040,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2104,7 +2104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2200,7 +2200,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2232,7 +2232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2328,7 +2328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index a7ac6f1..45e7967 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssra.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vssra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index 67b0e77..fa80883 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -960,7 +960,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -982,7 +982,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1004,7 +1004,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1026,7 +1026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1048,7 +1048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1136,7 +1136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1158,7 +1158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1180,7 +1180,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1202,7 +1202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1224,7 +1224,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1246,7 +1246,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1268,7 +1268,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1290,7 +1290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1312,7 +1312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1334,7 +1334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1356,7 +1356,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1378,7 +1378,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1400,7 +1400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1422,7 +1422,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1444,7 +1444,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1466,7 +1466,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1488,7 +1488,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1510,7 +1510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1532,7 +1532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1554,7 +1554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1598,7 +1598,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1620,7 +1620,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1642,7 +1642,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1664,7 +1664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1686,7 +1686,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1708,7 +1708,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1730,7 +1730,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1752,7 +1752,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1769,7 +1769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1784,7 +1784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1801,7 +1801,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1816,7 +1816,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1833,7 +1833,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1848,7 +1848,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1865,7 +1865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1880,7 +1880,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1897,7 +1897,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1912,7 +1912,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1976,7 +1976,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2008,7 +2008,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2040,7 +2040,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2072,7 +2072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2104,7 +2104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2136,7 +2136,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2168,7 +2168,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2200,7 +2200,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2232,7 +2232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2264,7 +2264,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2296,7 +2296,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2328,7 +2328,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index d630580..6c66ef5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssrl.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vssrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
index 8b8df04..69d60b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
index b588223..70cc4ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
index 4a9d28b..7f08244 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
index 05e4dc9..ce3a09b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
index 659d93c..01ec914 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
index d07fcc6..ada9236 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
index 32de785..386175a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
index 616c81b..5099a4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
     <vscale x 1 x i8> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
     <vscale x 2 x i8> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
     <vscale x 4 x i8> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
     <vscale x 8 x i8> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
     <vscale x 1 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
     <vscale x 2 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
     <vscale x 8 x i16> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
     <vscale x 1 x half> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
     <vscale x 2 x half> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
     <vscale x 4 x half> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
     <vscale x 8 x half> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
     <vscale x 1 x float> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
     <vscale x 2 x float> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
     <vscale x 4 x float> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
     <vscale x 8 x float> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
     <vscale x 1 x double> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
     <vscale x 2 x double> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
     <vscale x 4 x double> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
     <vscale x 8 x double> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
     <vscale x 1 x i8> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
     <vscale x 2 x i8> %0,
@@ -1392,7 +1392,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1415,7 +1415,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
     <vscale x 4 x i8> %0,
@@ -1438,7 +1438,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1461,7 +1461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
     <vscale x 8 x i8> %0,
@@ -1484,7 +1484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1507,7 +1507,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
     <vscale x 16 x i8> %0,
@@ -1530,7 +1530,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1553,7 +1553,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
@@ -1576,7 +1576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
@@ -1622,7 +1622,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1645,7 +1645,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
@@ -1668,7 +1668,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1691,7 +1691,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
@@ -1714,7 +1714,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1737,7 +1737,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
@@ -1760,7 +1760,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1783,7 +1783,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -1806,7 +1806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1829,7 +1829,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -1852,7 +1852,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1875,7 +1875,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -1898,7 +1898,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1921,7 +1921,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -1944,7 +1944,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1967,7 +1967,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -1990,7 +1990,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -2036,7 +2036,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2059,7 +2059,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -2082,7 +2082,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2105,7 +2105,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -2128,7 +2128,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2151,7 +2151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -2174,7 +2174,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2197,7 +2197,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
     <vscale x 1 x half> %0,
@@ -2220,7 +2220,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2243,7 +2243,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
     <vscale x 2 x half> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2289,7 +2289,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
     <vscale x 4 x half> %0,
@@ -2312,7 +2312,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2335,7 +2335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
     <vscale x 8 x half> %0,
@@ -2358,7 +2358,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2381,7 +2381,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
     <vscale x 16 x half> %0,
@@ -2404,7 +2404,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2427,7 +2427,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
     <vscale x 1 x float> %0,
@@ -2450,7 +2450,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
     <vscale x 2 x float> %0,
@@ -2496,7 +2496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2519,7 +2519,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
     <vscale x 4 x float> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2565,7 +2565,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
     <vscale x 8 x float> %0,
@@ -2588,7 +2588,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2611,7 +2611,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
     <vscale x 16 x float> %0,
@@ -2634,7 +2634,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2657,7 +2657,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
     <vscale x 1 x double> %0,
@@ -2680,7 +2680,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2703,7 +2703,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
     <vscale x 2 x double> %0,
@@ -2726,7 +2726,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2749,7 +2749,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
     <vscale x 4 x double> %0,
@@ -2772,7 +2772,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2795,7 +2795,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
     <vscale x 8 x double> %0,
@@ -2818,7 +2818,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2841,7 +2841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
@@ -2864,7 +2864,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2887,7 +2887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
@@ -2910,7 +2910,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2933,7 +2933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
@@ -2956,7 +2956,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -2979,7 +2979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
@@ -3002,7 +3002,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3025,7 +3025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
@@ -3048,7 +3048,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3071,7 +3071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
@@ -3094,7 +3094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3117,7 +3117,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -3140,7 +3140,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3163,7 +3163,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -3186,7 +3186,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3209,7 +3209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -3232,7 +3232,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3255,7 +3255,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -3278,7 +3278,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3301,7 +3301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -3324,7 +3324,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3347,7 +3347,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -3370,7 +3370,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3393,7 +3393,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -3416,7 +3416,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3439,7 +3439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -3462,7 +3462,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3485,7 +3485,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -3508,7 +3508,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3531,7 +3531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -3554,7 +3554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3577,7 +3577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -3600,7 +3600,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3623,7 +3623,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %0,
@@ -3646,7 +3646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3669,7 +3669,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %0,
@@ -3692,7 +3692,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3715,7 +3715,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %0,
@@ -3738,7 +3738,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3761,7 +3761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %0,
@@ -3784,7 +3784,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3807,7 +3807,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
     <vscale x 1 x half> %0,
@@ -3830,7 +3830,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3853,7 +3853,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
     <vscale x 2 x half> %0,
@@ -3876,7 +3876,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3899,7 +3899,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
     <vscale x 4 x half> %0,
@@ -3922,7 +3922,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3945,7 +3945,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
     <vscale x 8 x half> %0,
@@ -3968,7 +3968,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -3991,7 +3991,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
     <vscale x 16 x half> %0,
@@ -4014,7 +4014,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4037,7 +4037,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
     <vscale x 32 x half> %0,
@@ -4060,7 +4060,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4083,7 +4083,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
     <vscale x 1 x float> %0,
@@ -4106,7 +4106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4129,7 +4129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
     <vscale x 2 x float> %0,
@@ -4152,7 +4152,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4175,7 +4175,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
     <vscale x 4 x float> %0,
@@ -4198,7 +4198,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4221,7 +4221,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
     <vscale x 8 x float> %0,
@@ -4244,7 +4244,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4267,7 +4267,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
     <vscale x 16 x float> %0,
@@ -4290,7 +4290,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4313,7 +4313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
     <vscale x 1 x double> %0,
@@ -4336,7 +4336,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4359,7 +4359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
     <vscale x 2 x double> %0,
@@ -4382,7 +4382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4405,7 +4405,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
     <vscale x 4 x double> %0,
@@ -4428,7 +4428,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4451,7 +4451,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
     <vscale x 8 x double> %0,
@@ -4474,7 +4474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4497,7 +4497,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -4520,7 +4520,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4543,7 +4543,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -4566,7 +4566,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4589,7 +4589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -4612,7 +4612,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4635,7 +4635,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -4658,7 +4658,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4681,7 +4681,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -4704,7 +4704,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4727,7 +4727,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -4750,7 +4750,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4773,7 +4773,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -4796,7 +4796,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4819,7 +4819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -4842,7 +4842,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4865,7 +4865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -4888,7 +4888,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4911,7 +4911,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -4934,7 +4934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4957,7 +4957,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -4980,7 +4980,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5003,7 +5003,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -5026,7 +5026,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5049,7 +5049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -5072,7 +5072,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5095,7 +5095,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %0,
@@ -5118,7 +5118,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5141,7 +5141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %0,
@@ -5164,7 +5164,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5187,7 +5187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %0,
@@ -5210,7 +5210,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5233,7 +5233,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %0,
@@ -5256,7 +5256,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5279,7 +5279,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %0,
@@ -5302,7 +5302,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5325,7 +5325,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %0,
@@ -5348,7 +5348,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5371,7 +5371,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %0,
@@ -5394,7 +5394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5417,7 +5417,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %0,
@@ -5440,7 +5440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5463,7 +5463,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %0,
@@ -5486,7 +5486,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5509,7 +5509,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
     <vscale x 1 x half> %0,
@@ -5532,7 +5532,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5555,7 +5555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
     <vscale x 2 x half> %0,
@@ -5578,7 +5578,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5601,7 +5601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
     <vscale x 4 x half> %0,
@@ -5624,7 +5624,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5647,7 +5647,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
     <vscale x 8 x half> %0,
@@ -5670,7 +5670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5693,7 +5693,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
     <vscale x 16 x half> %0,
@@ -5716,7 +5716,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5739,7 +5739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
     <vscale x 32 x half> %0,
@@ -5762,7 +5762,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5785,7 +5785,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
     <vscale x 1 x float> %0,
@@ -5808,7 +5808,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5831,7 +5831,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
     <vscale x 2 x float> %0,
@@ -5854,7 +5854,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5877,7 +5877,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
     <vscale x 4 x float> %0,
@@ -5900,7 +5900,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5923,7 +5923,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
     <vscale x 8 x float> %0,
@@ -5946,7 +5946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5969,7 +5969,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
     <vscale x 16 x float> %0,
@@ -5992,7 +5992,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6015,7 +6015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
     <vscale x 1 x double> %0,
@@ -6038,7 +6038,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6061,7 +6061,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
     <vscale x 2 x double> %0,
@@ -6084,7 +6084,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6107,7 +6107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
     <vscale x 4 x double> %0,
@@ -6130,7 +6130,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
@@ -6153,7 +6153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
     <vscale x 8 x double> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
index 0ff0665..f6eda49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
index 4e0111f..b4840b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index 0fe3f0d..c7a2153 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 2a86c6b..3eb86f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwadd.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
index ec449f6..292c76b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
index dc150cb..1a4557b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index 25304be..c582ab3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index 8f370e4..8588f38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwaddu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
index b06c422b..127bb02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
index 94773ad..32a0726a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmacc.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
index da6af93..9998067 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
index 1a48799..829fce1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccsu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
index 277595b..406b21f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
index 3822198..9e9951d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -840,7 +840,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -863,7 +863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -886,7 +886,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -932,7 +932,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -955,7 +955,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -978,7 +978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1001,7 +1001,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1070,7 +1070,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1116,7 +1116,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1139,7 +1139,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1162,7 +1162,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1185,7 +1185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1208,7 +1208,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1231,7 +1231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1254,7 +1254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1277,7 +1277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1300,7 +1300,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1323,7 +1323,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1346,7 +1346,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1369,7 +1369,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
index 8ccc25a..7d6f14e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
index 89fbadc..4fcdd03 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
   <vscale x 1 x i16>,
   i8,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmaccus.vx v8, a0, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
index dc55be7..0969d41 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
index 43aafad..6eca943 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmul.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmul.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmul.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
index 9728f7e..7a3bd74 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
index 61b9a58..e24cec1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulsu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
index d2d4bd3..1342e06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
index 0d18165..cdebffc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwmulu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
index 2f8799e..e2dd028 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
index 3b81965..2f436a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
index 5894683..58e8c18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
index 9370d64..9a80e4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
   <vscale x 4 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
     <vscale x 4 x i16> %0,
@@ -35,7 +35,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -58,7 +58,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8(
     <vscale x 4 x i16> %0,
@@ -81,7 +81,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -104,7 +104,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -127,7 +127,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -150,7 +150,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8(
     <vscale x 4 x i16> %0,
@@ -173,7 +173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -196,7 +196,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8(
     <vscale x 4 x i16> %0,
@@ -219,7 +219,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -242,7 +242,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8(
     <vscale x 4 x i16> %0,
@@ -265,7 +265,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -288,7 +288,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8(
     <vscale x 4 x i16> %0,
@@ -311,7 +311,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -334,7 +334,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16(
     <vscale x 2 x i32> %0,
@@ -357,7 +357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -380,7 +380,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -403,7 +403,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -426,7 +426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16(
     <vscale x 2 x i32> %0,
@@ -449,7 +449,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -472,7 +472,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16(
     <vscale x 2 x i32> %0,
@@ -495,7 +495,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16(
     <vscale x 2 x i32> %0,
@@ -541,7 +541,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16(
     <vscale x 2 x i32> %0,
@@ -587,7 +587,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -610,7 +610,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -633,7 +633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -656,7 +656,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
     <vscale x 1 x i64> %0,
@@ -679,7 +679,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -702,7 +702,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
     <vscale x 1 x i64> %0,
@@ -725,7 +725,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -748,7 +748,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
     <vscale x 1 x i64> %0,
@@ -771,7 +771,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -794,7 +794,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
     <vscale x 1 x i64> %0,
@@ -817,7 +817,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vwredsumu.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
     <vscale x 1 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
index 3c5801d..57c9256 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
index ec7545a..493e597 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index e7abd4f..b34905a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 3c23315..727c92e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsub.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsub.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
index 72aad42..3bb39c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
index 298e3c8..65dab01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -259,7 +259,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -282,7 +282,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -304,7 +304,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -327,7 +327,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -349,7 +349,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -372,7 +372,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -394,7 +394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -417,7 +417,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -439,7 +439,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -484,7 +484,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -507,7 +507,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -529,7 +529,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -552,7 +552,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v26, v8, v9
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -574,7 +574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v10, v11, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -597,7 +597,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v28, v8, v10
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -619,7 +619,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v12, v14, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -642,7 +642,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vv v16, v8, v12
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -664,7 +664,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vv v8, v16, v20, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -687,7 +687,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -709,7 +709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
     <vscale x 1 x i16> %0,
@@ -732,7 +732,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -754,7 +754,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
     <vscale x 4 x i16> %0,
@@ -822,7 +822,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -844,7 +844,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
     <vscale x 8 x i16> %0,
@@ -867,7 +867,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -889,7 +889,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
     <vscale x 16 x i16> %0,
@@ -912,7 +912,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -934,7 +934,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
     <vscale x 32 x i16> %0,
@@ -957,7 +957,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -979,7 +979,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
     <vscale x 1 x i32> %0,
@@ -1002,7 +1002,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1024,7 +1024,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
     <vscale x 2 x i32> %0,
@@ -1047,7 +1047,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1069,7 +1069,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
     <vscale x 4 x i32> %0,
@@ -1092,7 +1092,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1114,7 +1114,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
     <vscale x 8 x i32> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
     <vscale x 16 x i32> %0,
@@ -1182,7 +1182,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v25, v8, a0
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1204,7 +1204,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32(
     <vscale x 1 x i64> %0,
@@ -1227,7 +1227,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v26, v8, a0
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1249,7 +1249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32(
     <vscale x 2 x i64> %0,
@@ -1272,7 +1272,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v28, v8, a0
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1294,7 +1294,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32(
     <vscale x 4 x i64> %0,
@@ -1317,7 +1317,7 @@
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.vx v16, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1339,7 +1339,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 1242293..3b008ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index 1e83630..a778a44 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
@@ -12,7 +12,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -34,7 +34,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %0,
@@ -57,7 +57,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -79,7 +79,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %0,
@@ -102,7 +102,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -124,7 +124,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %0,
@@ -147,7 +147,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -169,7 +169,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %0,
@@ -192,7 +192,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -214,7 +214,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %0,
@@ -237,7 +237,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -257,10 +257,10 @@
 define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl4re8.v v28, (a0)
+; CHECK-NEXT:    vl4r.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %0,
@@ -283,7 +283,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -305,7 +305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %0,
@@ -328,7 +328,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -350,7 +350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %0,
@@ -373,7 +373,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -395,7 +395,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %0,
@@ -418,7 +418,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -440,7 +440,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %0,
@@ -463,7 +463,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -486,7 +486,7 @@
 ; CHECK-NEXT:    vl4re16.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %0,
@@ -509,7 +509,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v25, v8, v9
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -531,7 +531,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %0,
@@ -554,7 +554,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v26, v8, v10
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -576,7 +576,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %0,
@@ -599,7 +599,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v28, v8, v12
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -621,7 +621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %0,
@@ -644,7 +644,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wv v24, v8, v16
 ; CHECK-NEXT:    vmv8r.v v8, v24
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vl4re32.v v28, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wv v8, v16, v28, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %0,
@@ -689,7 +689,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -711,7 +711,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
     <vscale x 1 x i16> %0,
@@ -733,7 +733,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -755,7 +755,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
     <vscale x 2 x i16> %0,
@@ -777,7 +777,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -799,7 +799,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
     <vscale x 4 x i16> %0,
@@ -821,7 +821,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -843,7 +843,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
     <vscale x 8 x i16> %0,
@@ -865,7 +865,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -887,7 +887,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
     <vscale x 16 x i16> %0,
@@ -909,7 +909,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -931,7 +931,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
     <vscale x 32 x i16> %0,
@@ -953,7 +953,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -975,7 +975,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
     <vscale x 1 x i32> %0,
@@ -997,7 +997,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1019,7 +1019,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
     <vscale x 2 x i32> %0,
@@ -1041,7 +1041,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1063,7 +1063,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
     <vscale x 4 x i32> %0,
@@ -1085,7 +1085,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1107,7 +1107,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
     <vscale x 8 x i32> %0,
@@ -1129,7 +1129,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1151,7 +1151,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
     <vscale x 16 x i32> %0,
@@ -1173,7 +1173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1195,7 +1195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
     <vscale x 1 x i64> %0,
@@ -1217,7 +1217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1239,7 +1239,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
     <vscale x 2 x i64> %0,
@@ -1261,7 +1261,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1283,7 +1283,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
     <vscale x 4 x i64> %0,
@@ -1305,7 +1305,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
     <vscale x 8 x i64> %0,
@@ -1327,7 +1327,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
index 05073f5..333e50f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1781,7 +1781,7 @@
 ; CHECK-NEXT:    vlse64.v v25, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v25
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1810,7 +1810,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v25, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1838,7 +1838,7 @@
 ; CHECK-NEXT:    vlse64.v v26, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v26
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1867,7 +1867,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v26, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1895,7 +1895,7 @@
 ; CHECK-NEXT:    vlse64.v v28, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v28
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1924,7 +1924,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v28, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1952,7 +1952,7 @@
 ; CHECK-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1981,7 +1981,7 @@
 ; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1998,7 +1998,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2013,7 +2013,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -2030,7 +2030,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2045,7 +2045,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2062,7 +2062,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2077,7 +2077,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2094,7 +2094,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2109,7 +2109,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2126,7 +2126,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2141,7 +2141,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2158,7 +2158,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2173,7 +2173,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2190,7 +2190,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2205,7 +2205,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2222,7 +2222,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2237,7 +2237,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2254,7 +2254,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2269,7 +2269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2286,7 +2286,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2301,7 +2301,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2318,7 +2318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2333,7 +2333,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2350,7 +2350,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2365,7 +2365,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2382,7 +2382,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2397,7 +2397,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2414,7 +2414,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2429,7 +2429,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2446,7 +2446,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2461,7 +2461,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2478,7 +2478,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2493,7 +2493,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2510,7 +2510,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2525,7 +2525,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2542,7 +2542,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2557,7 +2557,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2574,7 +2574,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2589,7 +2589,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2606,7 +2606,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2621,7 +2621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2638,7 +2638,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2653,7 +2653,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2670,7 +2670,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2685,7 +2685,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
index 2024faf..3b150f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
@@ -11,7 +11,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -33,7 +33,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -55,7 +55,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -77,7 +77,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -99,7 +99,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -121,7 +121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -143,7 +143,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -165,7 +165,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -187,7 +187,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -209,7 +209,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -231,7 +231,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -253,7 +253,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -275,7 +275,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -295,10 +295,10 @@
 define <vscale x 64 x i8> @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re8.v v24, (a0)
+; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
@@ -320,7 +320,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -342,7 +342,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -364,7 +364,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -386,7 +386,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -408,7 +408,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -430,7 +430,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -452,7 +452,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -474,7 +474,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -496,7 +496,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -518,7 +518,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -540,7 +540,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -563,7 +563,7 @@
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
@@ -585,7 +585,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -607,7 +607,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -629,7 +629,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -651,7 +651,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -673,7 +673,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -695,7 +695,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -717,7 +717,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -739,7 +739,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -761,7 +761,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -784,7 +784,7 @@
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
@@ -806,7 +806,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -828,7 +828,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
@@ -850,7 +850,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v10
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -872,7 +872,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
@@ -894,7 +894,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v12
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -916,7 +916,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
@@ -938,7 +938,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vv v8, v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -961,7 +961,7 @@
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli a0, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
@@ -983,7 +983,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1005,7 +1005,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1027,7 +1027,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1049,7 +1049,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1071,7 +1071,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1093,7 +1093,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -1115,7 +1115,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1137,7 +1137,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -1159,7 +1159,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1181,7 +1181,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -1203,7 +1203,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1225,7 +1225,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -1247,7 +1247,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1269,7 +1269,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -1291,7 +1291,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1313,7 +1313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -1335,7 +1335,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1357,7 +1357,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -1379,7 +1379,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1401,7 +1401,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -1423,7 +1423,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1445,7 +1445,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -1467,7 +1467,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1489,7 +1489,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -1511,7 +1511,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1533,7 +1533,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -1555,7 +1555,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1577,7 +1577,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -1599,7 +1599,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1621,7 +1621,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -1643,7 +1643,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1665,7 +1665,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -1687,7 +1687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1709,7 +1709,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -1731,7 +1731,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1753,7 +1753,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -1775,7 +1775,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1797,7 +1797,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -1819,7 +1819,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1841,7 +1841,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -1863,7 +1863,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1885,7 +1885,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -1907,7 +1907,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1929,7 +1929,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -1946,7 +1946,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1961,7 +1961,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
@@ -1978,7 +1978,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -1993,7 +1993,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
@@ -2010,7 +2010,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2025,7 +2025,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
@@ -2042,7 +2042,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2057,7 +2057,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
@@ -2074,7 +2074,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2089,7 +2089,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
@@ -2106,7 +2106,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2121,7 +2121,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
@@ -2138,7 +2138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2153,7 +2153,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
@@ -2170,7 +2170,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2185,7 +2185,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
@@ -2202,7 +2202,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2217,7 +2217,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
@@ -2234,7 +2234,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2249,7 +2249,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
@@ -2266,7 +2266,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2281,7 +2281,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
@@ -2298,7 +2298,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2313,7 +2313,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
@@ -2330,7 +2330,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2345,7 +2345,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
@@ -2362,7 +2362,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2377,7 +2377,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
@@ -2394,7 +2394,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2409,7 +2409,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
@@ -2426,7 +2426,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2441,7 +2441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
@@ -2458,7 +2458,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2473,7 +2473,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
@@ -2490,7 +2490,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2505,7 +2505,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
@@ -2522,7 +2522,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2537,7 +2537,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
     <vscale x 1 x i64> %0,
@@ -2554,7 +2554,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2569,7 +2569,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
     <vscale x 2 x i64> %0,
@@ -2586,7 +2586,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2601,7 +2601,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
     <vscale x 4 x i64> %0,
@@ -2618,7 +2618,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vxor.vi v8, v8, 9
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
     <vscale x 8 x i64> %0,
@@ -2633,7 +2633,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vxor.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
     <vscale x 8 x i64> %0,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
index 9c3d47a..21a8b34 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i32);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
index 97c09df..abfaa5f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
-; RUN:   --riscv-no-aliases < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   <vscale x 1 x i8>,
   i64);
@@ -11,7 +11,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -31,7 +31,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
     <vscale x 1 x i64> %1,
@@ -52,7 +52,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -72,7 +72,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
     <vscale x 2 x i64> %1,
@@ -93,7 +93,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -113,7 +113,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
     <vscale x 4 x i64> %1,
@@ -134,7 +134,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf8 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -154,7 +154,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
     <vscale x 8 x i64> %1,
@@ -175,7 +175,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -195,7 +195,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
     <vscale x 1 x i64> %1,
@@ -216,7 +216,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -236,7 +236,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
     <vscale x 2 x i64> %1,
@@ -257,7 +257,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -277,7 +277,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
     <vscale x 4 x i64> %1,
@@ -298,7 +298,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -318,7 +318,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
     <vscale x 8 x i64> %1,
@@ -339,7 +339,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -359,7 +359,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
     <vscale x 1 x i32> %1,
@@ -380,7 +380,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -400,7 +400,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
     <vscale x 2 x i32> %1,
@@ -421,7 +421,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -441,7 +441,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
     <vscale x 4 x i32> %1,
@@ -462,7 +462,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -482,7 +482,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
     <vscale x 8 x i32> %1,
@@ -503,7 +503,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf4 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -523,7 +523,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
     <vscale x 16 x i32> %1,
@@ -544,7 +544,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
     <vscale x 1 x i32> %0,
@@ -564,7 +564,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
     <vscale x 1 x i64> %1,
@@ -585,7 +585,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
     <vscale x 2 x i32> %0,
@@ -605,7 +605,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
     <vscale x 2 x i64> %1,
@@ -626,7 +626,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
     <vscale x 4 x i32> %0,
@@ -646,7 +646,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
     <vscale x 4 x i64> %1,
@@ -667,7 +667,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
     <vscale x 8 x i32> %0,
@@ -687,7 +687,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
     <vscale x 8 x i64> %1,
@@ -708,7 +708,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
     <vscale x 1 x i16> %0,
@@ -728,7 +728,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
     <vscale x 1 x i32> %1,
@@ -749,7 +749,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
     <vscale x 2 x i16> %0,
@@ -769,7 +769,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
     <vscale x 2 x i32> %1,
@@ -790,7 +790,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
     <vscale x 4 x i16> %0,
@@ -810,7 +810,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
     <vscale x 4 x i32> %1,
@@ -831,7 +831,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
     <vscale x 8 x i16> %0,
@@ -851,7 +851,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
     <vscale x 8 x i32> %1,
@@ -872,7 +872,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
     <vscale x 16 x i16> %0,
@@ -892,7 +892,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
     <vscale x 16 x i32> %1,
@@ -913,7 +913,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
     <vscale x 1 x i8> %0,
@@ -933,7 +933,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
     <vscale x 1 x i16> %1,
@@ -954,7 +954,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
     <vscale x 2 x i8> %0,
@@ -974,7 +974,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
     <vscale x 2 x i16> %1,
@@ -995,7 +995,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v25, v8
 ; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
     <vscale x 4 x i8> %0,
@@ -1015,7 +1015,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
     <vscale x 4 x i16> %1,
@@ -1036,7 +1036,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v26, v8
 ; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
     <vscale x 8 x i8> %0,
@@ -1056,7 +1056,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
     <vscale x 8 x i16> %1,
@@ -1077,7 +1077,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v28, v8
 ; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
     <vscale x 16 x i8> %0,
@@ -1097,7 +1097,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
     <vscale x 16 x i16> %1,
@@ -1118,7 +1118,7 @@
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
 ; CHECK-NEXT:    vzext.vf2 v16, v8
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
     <vscale x 32 x i8> %0,
@@ -1138,7 +1138,7 @@
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
 ; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
     <vscale x 32 x i16> %1,
diff --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
index bcfcec5..516ffc0 100644
--- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh \
-; RUN:   -verify-machineinstrs --riscv-no-aliases < %s \
+; RUN:   -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s
 
 declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
@@ -24,7 +24,7 @@
 ; CHECK-NEXT:    flh ft0, 14(sp) # 2-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
@@ -45,7 +45,7 @@
 ; CHECK-NEXT:    flw ft0, 12(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
@@ -66,7 +66,7 @@
 ; CHECK-NEXT:    fld ft0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    vfmv.v.f v8, ft0
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    jalr zero, 0(ra)
+; CHECK-NEXT:    ret
 entry:
   %a = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> %0)
   tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()